diff --git a/CHANGELOG.md b/CHANGELOG.md index c9f0fbeeb84..a854b5f95a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,29 @@ -v1.8.4 [unreleased] +v1.8.5 [unreleased] +------------------- + +### Features + +- [#20917](https://github.com/influxdata/influxdb/pull/20917): feat(inspect): Add report-disk for disk usage by measurement +- [#20118](https://github.com/influxdata/influxdb/pull/20118): feat: Optimize shard lookups in groups containing only one shard. Thanks @StoneYunZhao! +- [#20910](https://github.com/influxdata/influxdb/pull/20910): feat: Make meta queries respect QueryTimeout values +- [#20989](https://github.com/influxdata/influxdb/pull/20989): feat: influx_inspect export to standard out +- [#21021](https://github.com/influxdata/influxdb/pull/21021): feat: Log query text for POST requests + +### Bugfixes + +- [#21053](https://github.com/influxdata/influxdb/pull/21053): fix: help text for influx_inspect +- [#20101](https://github.com/influxdata/influxdb/pull/20101): fix(write): Successful writes increment write error statistics incorrectly. +- [#20276](https://github.com/influxdata/influxdb/pull/20276): fix(error): unsupported value: +Inf" error not handled gracefully. +- [#20277](https://github.com/influxdata/influxdb/pull/20277): fix(query): Group By queries with offset that crosses a DST boundary can fail. +- [#20295](https://github.com/influxdata/influxdb/pull/20295): fix: cp.Mux.Serve() closes all net.Listener instances silently on error. +- [#19832](https://github.com/influxdata/influxdb/pull/19832): fix(prometheus): regexp handling should comply with PromQL. +- [#20432](https://github.com/influxdata/influxdb/pull/20432): fix(error): SELECT INTO doesn't return error with unsupported value +- [#20033](https://github.com/influxdata/influxdb/pull/20033): fix(tsm1): "snapshot in progress" error during backup +- [#20909](https://github.com/influxdata/influxdb/pull/20909): fix(tsm1): data race when accessing tombstone stats +- [#20912](https://github.com/influxdata/influxdb/pull/20912): fix(tsdb): minimize lock contention when adding new fields or measure +- [#20914](https://github.com/influxdata/influxdb/pull/20914): fix: infinite recursion bug (#20862) + +v1.8.4 [2021-01-27] ------------------- ### Bugfixes diff --git a/Dockerfile_build_ubuntu64_git b/Dockerfile_build_ubuntu64_git index a4e9b4e46ed..59a1d2808ad 100644 --- a/Dockerfile_build_ubuntu64_git +++ b/Dockerfile_build_ubuntu64_git @@ -27,7 +27,7 @@ VOLUME $PROJECT_DIR # Install go -ENV GO_VERSION 1.12 +ENV GO_VERSION 1.13 ENV GO_ARCH amd64 RUN wget --no-verbose https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ diff --git a/_tools/tmpl/main.go b/_tools/tmpl/main.go index 191a6f3cc7e..4f44aafe093 100644 --- a/_tools/tmpl/main.go +++ b/_tools/tmpl/main.go @@ -248,4 +248,4 @@ func StripComments(raw []byte) []byte { } return buf.Bytes() -} \ No newline at end of file +} diff --git a/_tools/tmpl/main_test.go b/_tools/tmpl/main_test.go index c24fb7e0caf..cbb1f07e8fb 100644 --- a/_tools/tmpl/main_test.go +++ b/_tools/tmpl/main_test.go @@ -54,4 +54,4 @@ func TestStripComments(t *testing.T) { } }) } -} \ No newline at end of file +} diff --git a/cluster/points_writer.go b/cluster/points_writer.go index 2ae1102a828..222dad685f6 100644 --- a/cluster/points_writer.go +++ b/cluster/points_writer.go @@ -212,7 +212,7 @@ func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) mapping := NewShardMapping() for _, p := range wp.Points { sg := timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] - sh := sg.ShardFor(p.HashID()) + sh := sg.ShardFor(p) mapping.MapPoint(&sh, p) } return mapping, nil diff --git a/cmd/influx_inspect/export/export.go b/cmd/influx_inspect/export/export.go index 841328edd9d..59b3c6a6178 100644 --- a/cmd/influx_inspect/export/export.go +++ b/cmd/influx_inspect/export/export.go @@ -44,6 +44,8 @@ type Command struct { walFiles map[string][]string } +const stdoutMark = "-" + // NewCommand returns a new instance of Command. func NewCommand() *Command { return &Command{ @@ -56,13 +58,18 @@ func NewCommand() *Command { } } +// Are we writing to standard out? +func (cmd *Command) usingStdOut() bool { + return cmd.out == stdoutMark +} + // Run executes the command. func (cmd *Command) Run(args ...string) error { var start, end string fs := flag.NewFlagSet("export", flag.ExitOnError) fs.StringVar(&cmd.dataDir, "datadir", os.Getenv("HOME")+"/.influxdb/data", "Data storage path") fs.StringVar(&cmd.walDir, "waldir", os.Getenv("HOME")+"/.influxdb/wal", "WAL storage path") - fs.StringVar(&cmd.out, "out", os.Getenv("HOME")+"/.influxdb/export", "Destination file to export to") + fs.StringVar(&cmd.out, "out", os.Getenv("HOME")+"/.influxdb/export", "'-' for standard out or the destination file to export to") fs.StringVar(&cmd.database, "database", "", "Optional: the database to export") fs.StringVar(&cmd.retentionPolicy, "retention", "", "Optional: the retention policy to export (requires -database)") fs.StringVar(&start, "start", "", "Optional: the start time to export (RFC3339 format)") @@ -205,23 +212,29 @@ func (cmd *Command) writeDDL(mw io.Writer, w io.Writer) error { func (cmd *Command) writeDML(mw io.Writer, w io.Writer) error { fmt.Fprintln(mw, "# DML") + var msgOut io.Writer + if cmd.usingStdOut() { + msgOut = cmd.Stderr + } else { + msgOut = cmd.Stdout + } for key := range cmd.manifest { keys := strings.Split(key, string(os.PathSeparator)) fmt.Fprintf(mw, "# CONTEXT-DATABASE:%s\n", keys[0]) fmt.Fprintf(mw, "# CONTEXT-RETENTION-POLICY:%s\n", keys[1]) if files, ok := cmd.tsmFiles[key]; ok { - fmt.Fprintf(cmd.Stdout, "writing out tsm file data for %s...", key) + fmt.Fprintf(msgOut, "writing out tsm file data for %s...", key) if err := cmd.writeTsmFiles(mw, w, files); err != nil { return err } - fmt.Fprintln(cmd.Stdout, "complete.") + fmt.Fprintln(msgOut, "complete.") } if _, ok := cmd.walFiles[key]; ok { - fmt.Fprintf(cmd.Stdout, "writing out wal file data for %s...", key) + fmt.Fprintf(msgOut, "writing out wal file data for %s...", key) if err := cmd.writeWALFiles(mw, w, cmd.walFiles[key], key); err != nil { return err } - fmt.Fprintln(cmd.Stdout, "complete.") + fmt.Fprintln(msgOut, "complete.") } } @@ -254,20 +267,24 @@ func (cmd *Command) writeFull(mw io.Writer, w io.Writer) error { } func (cmd *Command) write() error { - // open our output file and create an output buffer - f, err := os.Create(cmd.out) - if err != nil { - return err + var w io.Writer + if cmd.usingStdOut() { + w = cmd.Stdout + } else { + // open our output file and create an output buffer + f, err := os.Create(cmd.out) + if err != nil { + return err + } + defer f.Close() + w = f } - defer f.Close() - // Because calling (*os.File).Write is relatively expensive, // and we don't *need* to sync to disk on every written line of export, // use a sized buffered writer so that we only sync the file every megabyte. - bw := bufio.NewWriterSize(f, 1024*1024) + bw := bufio.NewWriterSize(w, 1024*1024) defer bw.Flush() - - var w io.Writer = bw + w = bw if cmd.compress { gzw := gzip.NewWriter(w) diff --git a/cmd/influx_inspect/help/help.go b/cmd/influx_inspect/help/help.go index 58833eedabf..ae68298c4e9 100644 --- a/cmd/influx_inspect/help/help.go +++ b/cmd/influx_inspect/help/help.go @@ -37,7 +37,8 @@ The commands are: export exports raw data from a shard to line protocol buildtsi generates tsi1 indexes from tsm1 data help display this help message - report displays a shard level report + report displays a shard level cardinality report + report-disk displays a shard level disk usage report verify verifies integrity of TSM files verify-seriesfile verifies integrity of the Series file diff --git a/cmd/influx_inspect/main.go b/cmd/influx_inspect/main.go index 47d0c209a54..74dd3d82821 100644 --- a/cmd/influx_inspect/main.go +++ b/cmd/influx_inspect/main.go @@ -16,6 +16,7 @@ import ( "github.com/influxdata/influxdb/cmd/influx_inspect/export" "github.com/influxdata/influxdb/cmd/influx_inspect/help" "github.com/influxdata/influxdb/cmd/influx_inspect/report" + "github.com/influxdata/influxdb/cmd/influx_inspect/reportdisk" "github.com/influxdata/influxdb/cmd/influx_inspect/reporttsi" "github.com/influxdata/influxdb/cmd/influx_inspect/verify/seriesfile" "github.com/influxdata/influxdb/cmd/influx_inspect/verify/tombstone" @@ -98,6 +99,11 @@ func (m *Main) Run(args ...string) error { if err := name.Run(args...); err != nil { return fmt.Errorf("report: %s", err) } + case "report-disk": + name := reportdisk.NewCommand() + if err := name.Run(args...); err != nil { + return fmt.Errorf("report: %s", err) + } case "reporttsi": name := reporttsi.NewCommand() if err := name.Run(args...); err != nil { diff --git a/cmd/influx_inspect/report/report.go b/cmd/influx_inspect/report/report.go index 5b6c1f137f6..62470bb71bc 100644 --- a/cmd/influx_inspect/report/report.go +++ b/cmd/influx_inspect/report/report.go @@ -15,6 +15,7 @@ import ( "time" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/reporthelper" "github.com/influxdata/influxdb/tsdb/engine/tsm1" "github.com/retailnext/hllpp" ) @@ -60,7 +61,7 @@ func (cmd *Command) Run(args ...string) error { cmd.dir = fs.Arg(0) - err := cmd.isShardDir(cmd.dir) + err := reporthelper.IsShardDir(cmd.dir) if cmd.detailed && err != nil { return fmt.Errorf("-detailed only supported for shard dirs") } @@ -79,8 +80,8 @@ func (cmd *Command) Run(args ...string) error { minTime, maxTime := int64(math.MaxInt64), int64(math.MinInt64) var fileCount int - if err := cmd.walkShardDirs(cmd.dir, func(db, rp, id, path string) error { - if cmd.pattern != "" && strings.Contains(path, cmd.pattern) { + if err := reporthelper.WalkShardDirs(cmd.dir, func(db, rp, id, path string) error { + if cmd.pattern != "" && !strings.Contains(path, cmd.pattern) { return nil } @@ -218,64 +219,6 @@ func sortKeys(vals map[string]counter) (keys []string) { return keys } -func (cmd *Command) isShardDir(dir string) error { - name := filepath.Base(dir) - if id, err := strconv.Atoi(name); err != nil || id < 1 { - return fmt.Errorf("not a valid shard dir: %v", dir) - } - - return nil -} - -func (cmd *Command) walkShardDirs(root string, fn func(db, rp, id, path string) error) error { - type location struct { - db, rp, id, path string - } - - var dirs []location - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - if filepath.Ext(info.Name()) == "."+tsm1.TSMFileExtension { - shardDir := filepath.Dir(path) - - if err := cmd.isShardDir(shardDir); err != nil { - return err - } - absPath, err := filepath.Abs(path) - if err != nil { - return err - } - parts := strings.Split(absPath, string(filepath.Separator)) - db, rp, id := parts[len(parts)-4], parts[len(parts)-3], parts[len(parts)-2] - dirs = append(dirs, location{db: db, rp: rp, id: id, path: path}) - return nil - } - return nil - }); err != nil { - return err - } - - sort.Slice(dirs, func(i, j int) bool { - a, _ := strconv.Atoi(dirs[i].id) - b, _ := strconv.Atoi(dirs[j].id) - return a < b - }) - - for _, shard := range dirs { - if err := fn(shard.db, shard.rp, shard.id, shard.path); err != nil { - return err - } - } - return nil -} - // printUsage prints the usage message to STDERR. func (cmd *Command) printUsage() { usage := `Displays shard level report. diff --git a/cmd/influx_inspect/reportdisk/reportdisk.go b/cmd/influx_inspect/reportdisk/reportdisk.go new file mode 100644 index 00000000000..f3896aa4e80 --- /dev/null +++ b/cmd/influx_inspect/reportdisk/reportdisk.go @@ -0,0 +1,311 @@ +// Package report reports statistics about TSM files. +package reportdisk + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/reporthelper" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +// Command represents the program execution for "influxd report". +type Command struct { + Stderr io.Writer + Stdout io.Writer + + dir string + pattern string + detailed bool +} + +// NewCommand returns a new instance of Command. +func NewCommand() *Command { + return &Command{ + Stderr: os.Stderr, + Stdout: os.Stdout, + } +} + +// Run executes the command. +func (cmd *Command) Run(args ...string) error { + fs := flag.NewFlagSet("report", flag.ExitOnError) + fs.StringVar(&cmd.pattern, "pattern", "", "Include only files matching a pattern") + fs.BoolVar(&cmd.detailed, "detailed", false, "Report disk size by measurement") + + fs.SetOutput(cmd.Stdout) + fs.Usage = cmd.printUsage + + if err := fs.Parse(args); err != nil { + return err + } + + cmd.dir = fs.Arg(0) + + start := time.Now() + + shardSizes := ShardSizes{} + if err := reporthelper.WalkShardDirs(cmd.dir, func(db, rp, id, path string) error { + if cmd.pattern != "" && !strings.Contains(path, cmd.pattern) { + return nil + } + + stat, err := os.Stat(path) + if err != nil { + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", path, err) + return nil + } + + shardSizes.AddTsmFileWithSize(db, rp, id, stat.Size()) + return nil + }); err != nil { + return err + } + + measurementSizes := MeasurementSizes{} + if cmd.detailed { + processedFiles := 0 + progress := NewProgressReporter(cmd.Stderr) + + if err := reporthelper.WalkShardDirs(cmd.dir, func(db, rp, id, path string) error { + if cmd.pattern != "" && !strings.Contains(path, cmd.pattern) { + return nil + } + file, err := os.OpenFile(path, os.O_RDONLY, 0600) + if err != nil { + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", path, err) + return nil + } + + progress.Report(fmt.Sprintf("TSM files inspected: %d\t/%d", processedFiles, shardSizes.files)) + processedFiles++ + + reader, err := tsm1.NewTSMReader(file) + if err != nil { + fmt.Fprintf(cmd.Stderr, "error: %s: %v. Skipping.\n", file.Name(), err) + return nil + } + + keyNum := reader.KeyCount() + for i := 0; i < keyNum; i++ { + key, _ := reader.KeyAt(i) + series, _ := tsm1.SeriesAndFieldFromCompositeKey(key) + measurement := models.ParseName(series) + var size int64 + for _, entry := range reader.Entries(key) { + size += int64(entry.Size) + } + measurementSizes.AddSize(db, rp, string(measurement), size) + } + + return nil + }); err != nil { + return err + } + progress.Report(fmt.Sprintf("TSM files inspected: %d\t/%d", processedFiles, shardSizes.files)) + } + fmt.Fprintf(cmd.Stderr, "\nCompleted in %s\n", time.Since(start)) + + sanitize := func(s string) []byte { + b, _ := json.Marshal(s) // json shouldn't be throwing errors when marshalling a string + return b + } + + fmt.Fprintf(cmd.Stdout, `{ + "Summary": {"shards": %d, "tsm_files": %d, "total_tsm_size": %d }, + "Shard": [`, shardSizes.shards, shardSizes.files, shardSizes.totalSize) + + first := true + shardSizes.ForEach(func(db, rp, id string, detail ShardDetails) { + var shardString []byte + if s, err := strconv.ParseInt(id, 10, 64); err != nil && strconv.FormatInt(s, 10) == id { + shardString = []byte(id) + } else { + shardString = sanitize(id) + } + if !first { + fmt.Fprint(cmd.Stdout, ",") + } + first = false + fmt.Fprintf(cmd.Stdout, ` + {"db": %s, "rp": %s, "shard": %s, "tsm_files": %d, "size": %d}`, + sanitize(db), sanitize(rp), shardString, detail.files, detail.size) + }) + + if cmd.detailed { + fmt.Fprintf(cmd.Stdout, ` + ], + "Measurement": [`) + + first = true + measurementSizes.ForEach(func(db, rp, measurement string, detail MeasurementDetails) { + if !first { + fmt.Fprint(cmd.Stdout, ",") + } + first = false + fmt.Fprintf(cmd.Stdout, ` + {"db": %s, "rp": %s, "measurement": %s, "size": %d}`, + sanitize(db), sanitize(rp), sanitize(measurement), detail.size) + }) + + } + fmt.Fprintf(cmd.Stdout, ` + ] +} +`, + ) + + return nil +} + +// printUsage prints the usage message to STDERR. +func (cmd *Command) printUsage() { + usage := `Displays report of disk usage. + +Usage: influx_inspect report [flags] + + -pattern + Include only files matching a pattern. + -detailed + Report disk usage by measurement. + Defaults to "false". +` + + fmt.Fprintf(cmd.Stdout, usage) +} + +type ShardDetails struct { + size int64 + files int64 +} + +type ShardSizes struct { + m map[string]map[string]map[string]*ShardDetails + files int64 + shards int64 + totalSize int64 +} + +func (s *ShardSizes) AddTsmFileWithSize(db, rp, id string, size int64) { + if s.m == nil { + s.m = make(map[string]map[string]map[string]*ShardDetails) + } + if _, ok := s.m[db]; !ok { + s.m[db] = make(map[string]map[string]*ShardDetails) + } + if _, ok := s.m[db][rp]; !ok { + s.m[db][rp] = make(map[string]*ShardDetails) + } + if _, ok := s.m[db][rp][id]; !ok { + s.m[db][rp][id] = &ShardDetails{} + s.shards += 1 + } + s.m[db][rp][id].size += size + s.m[db][rp][id].files += 1 + s.files += 1 + s.totalSize += size +} + +func (s *ShardSizes) ForEach(f func(db, rp, id string, detail ShardDetails)) { + dbKeys := make([]string, 0, len(s.m)) + for db, _ := range s.m { + dbKeys = append(dbKeys, db) + } + sort.Strings(dbKeys) + for _, db := range dbKeys { + rpKeys := make([]string, 0, len(s.m[db])) + for rp, _ := range s.m[db] { + rpKeys = append(rpKeys, rp) + } + sort.Strings(rpKeys) + for _, rp := range rpKeys { + idKeys := make([]string, 0, len(s.m[db][rp])) + for id, _ := range s.m[db][rp] { + idKeys = append(idKeys, id) + } + sort.Strings(idKeys) + for _, id := range idKeys { + f(db, rp, id, *s.m[db][rp][id]) + } + } + } +} + +type MeasurementDetails struct { + size int64 +} + +type MeasurementSizes struct { + m map[string]map[string]map[string]*MeasurementDetails +} + +func (s *MeasurementSizes) AddSize(db, rp, measurement string, size int64) { + if s.m == nil { + s.m = make(map[string]map[string]map[string]*MeasurementDetails) + } + if _, ok := s.m[db]; !ok { + s.m[db] = make(map[string]map[string]*MeasurementDetails) + } + if _, ok := s.m[db][rp]; !ok { + s.m[db][rp] = make(map[string]*MeasurementDetails) + } + if _, ok := s.m[db][rp][measurement]; !ok { + s.m[db][rp][measurement] = &MeasurementDetails{} + } + s.m[db][rp][measurement].size += size +} + +func (s *MeasurementSizes) ForEach(f func(db, rp, measurement string, detail MeasurementDetails)) { + dbKeys := make([]string, 0, len(s.m)) + for db, _ := range s.m { + dbKeys = append(dbKeys, db) + } + sort.Strings(dbKeys) + for _, db := range dbKeys { + rpKeys := make([]string, 0, len(s.m[db])) + for rp, _ := range s.m[db] { + rpKeys = append(rpKeys, rp) + } + sort.Strings(rpKeys) + for _, rp := range rpKeys { + mKeys := make([]string, 0, len(s.m[db][rp])) + for m, _ := range s.m[db][rp] { + mKeys = append(mKeys, m) + } + sort.Strings(mKeys) + for _, m := range mKeys { + f(db, rp, m, *s.m[db][rp][m]) + } + } + } +} + +type ProgressReporter struct { + maxLength int + w io.Writer +} + +func NewProgressReporter(w io.Writer) *ProgressReporter { + return &ProgressReporter{w: w} +} + +func (p *ProgressReporter) Report(line string) { + if p.maxLength == 0 { + fmt.Fprintf(p.w, "\n") + p.maxLength = 1 + } + for len(line) < p.maxLength { + line += " " + } + p.maxLength = len(line) + p.maxLength++ + fmt.Fprint(p.w, line+"\r") +} diff --git a/cmd/influx_inspect/reportdisk/reportdisk_test.go b/cmd/influx_inspect/reportdisk/reportdisk_test.go new file mode 100644 index 00000000000..42e3d4ecaa4 --- /dev/null +++ b/cmd/influx_inspect/reportdisk/reportdisk_test.go @@ -0,0 +1,3 @@ +package reportdisk_test + +// TODO: write some tests diff --git a/cmd/influx_tools/internal/format/binary/binary.pb.go b/cmd/influx_tools/internal/format/binary/binary.pb.go index 3d037e61ffc..77af7512811 100644 --- a/cmd/influx_tools/internal/format/binary/binary.pb.go +++ b/cmd/influx_tools/internal/format/binary/binary.pb.go @@ -21,14 +21,19 @@ */ package binary -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" +import ( + fmt "fmt" -import time "time" + proto "github.com/gogo/protobuf/proto" -import io "io" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + time "time" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/cmd/influx_tsm/tsdb/internal/meta.pb.go b/cmd/influx_tsm/tsdb/internal/meta.pb.go index 1397774b836..ba05a9119ec 100644 --- a/cmd/influx_tsm/tsdb/internal/meta.pb.go +++ b/cmd/influx_tsm/tsdb/internal/meta.pb.go @@ -15,9 +15,13 @@ It has these top-level messages: */ package internal -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + + proto "github.com/gogo/protobuf/proto" + + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/cmd/influxd/backup_util/backup_util.go b/cmd/influxd/backup_util/backup_util.go index 2632da46966..a8fc5ce1a77 100644 --- a/cmd/influxd/backup_util/backup_util.go +++ b/cmd/influxd/backup_util/backup_util.go @@ -6,15 +6,15 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" "os" + "path/filepath" "sort" "strings" "github.com/gogo/protobuf/proto" internal "github.com/influxdata/influxdb/cmd/influxd/backup_util/internal" "github.com/influxdata/influxdb/services/snapshotter" - "io/ioutil" - "path/filepath" ) //go:generate protoc --gogo_out=. internal/data.proto diff --git a/cmd/influxd/backup_util/internal/data.pb.go b/cmd/influxd/backup_util/internal/data.pb.go index f6762af1b4a..edcd3451836 100644 --- a/cmd/influxd/backup_util/internal/data.pb.go +++ b/cmd/influxd/backup_util/internal/data.pb.go @@ -13,9 +13,13 @@ It has these top-level messages: */ package backup_util -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + + proto "github.com/gogo/protobuf/proto" + + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/cmd/influxd/run/server.go b/cmd/influxd/run/server.go index 9356b38d60e..eb4ea97e068 100644 --- a/cmd/influxd/run/server.go +++ b/cmd/influxd/run/server.go @@ -1,6 +1,7 @@ package run import ( + "context" "crypto/tls" "fmt" "github.com/influxdata/influxdb/cluster" @@ -226,6 +227,8 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { MaxSelectPointN: c.Coordinator.MaxSelectPointN, MaxSelectSeriesN: c.Coordinator.MaxSelectSeriesN, MaxSelectBucketsN: c.Coordinator.MaxSelectBucketsN, + StrictErrorHandling: s.TSDBStore.EngineOptions.Config.StrictErrorHandling, + } s.QueryExecutor.TaskManager.QueryTimeout = time.Duration(c.Coordinator.QueryTimeout) s.QueryExecutor.TaskManager.LogQueriesAfter = time.Duration(c.Coordinator.LogQueriesAfter) @@ -551,14 +554,16 @@ func (s *Server) reportServer() { for _, db := range dbs { name := db.Name - n, err := s.TSDBStore.SeriesCardinality(name) + // Use the context.Background() to avoid timing out on this. + n, err := s.TSDBStore.SeriesCardinality(context.Background(), name) if err != nil { s.Logger.Error(fmt.Sprintf("Unable to get series cardinality for database %s: %v", name, err)) } else { numSeries += n } - n, err = s.TSDBStore.MeasurementsCardinality(name) + // Use the context.Background() to avoid timing out on this. + n, err = s.TSDBStore.MeasurementsCardinality(context.Background(), name) if err != nil { s.Logger.Error(fmt.Sprintf("Unable to get measurement cardinality for database %s: %v", name, err)) } else { diff --git a/coordinator/points_writer.go b/coordinator/points_writer.go index 7c8410216a1..d05bb621fd4 100644 --- a/coordinator/points_writer.go +++ b/coordinator/points_writer.go @@ -234,7 +234,7 @@ func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) continue } - sh := sg.ShardFor(p.HashID()) + sh := sg.ShardFor(p) mapping.MapPoint(&sh, p) } return mapping, nil @@ -447,11 +447,10 @@ func (w *PointsWriter) writeToShardWithContext(ctx context.Context, shard *meta. atomic.AddInt64(&w.stats.WriteErr, 1) return err } - } else { + } else if err != nil { atomic.AddInt64(&w.stats.WriteErr, 1) return err } - atomic.AddInt64(&w.stats.WriteOK, 1) return nil } diff --git a/coordinator/statement_executor.go b/coordinator/statement_executor.go index df19f345fcf..6d5083bad45 100644 --- a/coordinator/statement_executor.go +++ b/coordinator/statement_executor.go @@ -51,6 +51,9 @@ type StatementExecutor struct { WritePointsInto(*IntoWriteRequest) error } + // Disallow INF values in SELECT INTO and other previously ignored errors + StrictErrorHandling bool + // Select statement limits MaxSelectPointN int MaxSelectSeriesN int @@ -58,10 +61,10 @@ type StatementExecutor struct { } // ExecuteStatement executes the given statement with the given execution context. -func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) ExecuteStatement(ctx *query.ExecutionContext, stmt influxql.Statement) error { // Select statements are handled separately so that they can be streamed. if stmt, ok := stmt.(*influxql.SelectStatement); ok { - return e.executeSelectStatement(stmt, ctx) + return e.executeSelectStatement(ctx, stmt) } var rows models.Rows @@ -142,9 +145,9 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query err = e.executeDropUserStatement(stmt) case *influxql.ExplainStatement: if stmt.Analyze { - rows, err = e.executeExplainAnalyzeStatement(stmt, ctx) + rows, err = e.executeExplainAnalyzeStatement(ctx, stmt) } else { - rows, err = e.executeExplainStatement(stmt, ctx) + rows, err = e.executeExplainStatement(ctx, stmt) } case *influxql.GrantStatement: if ctx.ReadOnly { @@ -169,19 +172,19 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query case *influxql.ShowContinuousQueriesStatement: rows, err = e.executeShowContinuousQueriesStatement(stmt) case *influxql.ShowDatabasesStatement: - rows, err = e.executeShowDatabasesStatement(stmt, ctx) + rows, err = e.executeShowDatabasesStatement(ctx, stmt) case *influxql.ShowDiagnosticsStatement: rows, err = e.executeShowDiagnosticsStatement(stmt) case *influxql.ShowGrantsForUserStatement: rows, err = e.executeShowGrantsForUserStatement(stmt) case *influxql.ShowMeasurementsStatement: - return e.executeShowMeasurementsStatement(stmt, ctx) + return e.executeShowMeasurementsStatement(ctx, stmt) case *influxql.ShowMeasurementCardinalityStatement: - rows, err = e.executeShowMeasurementCardinalityStatement(stmt) + rows, err = e.executeShowMeasurementCardinalityStatement(ctx, stmt) case *influxql.ShowRetentionPoliciesStatement: rows, err = e.executeShowRetentionPoliciesStatement(stmt) case *influxql.ShowSeriesCardinalityStatement: - rows, err = e.executeShowSeriesCardinalityStatement(stmt) + rows, err = e.executeShowSeriesCardinalityStatement(ctx, stmt) case *influxql.ShowShardsStatement: rows, err = e.executeShowShardsStatement(stmt) case *influxql.ShowShardGroupsStatement: @@ -191,9 +194,9 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query case *influxql.ShowSubscriptionsStatement: rows, err = e.executeShowSubscriptionsStatement(stmt) case *influxql.ShowTagKeysStatement: - return e.executeShowTagKeys(stmt, ctx) + return e.executeShowTagKeys(ctx, stmt) case *influxql.ShowTagValuesStatement: - return e.executeShowTagValues(stmt, ctx) + return e.executeShowTagValues(ctx, stmt) case *influxql.ShowUsersStatement: rows, err = e.executeShowUsersStatement(stmt) case *influxql.SetPasswordUserStatement: @@ -203,7 +206,7 @@ func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query err = e.executeSetPasswordUserStatement(stmt) case *influxql.ShowQueriesStatement, *influxql.KillQueryStatement: // Send query related statements to the task manager. - return e.TaskManager.ExecuteStatement(stmt, ctx) + return e.TaskManager.ExecuteStatement(ctx, stmt) default: return query.ErrInvalidQuery } @@ -408,7 +411,7 @@ func (e *StatementExecutor) executeDropUserStatement(q *influxql.DropUserStateme return e.MetaClient.DropUser(q.Name) } -func (e *StatementExecutor) executeExplainStatement(q *influxql.ExplainStatement, ctx *query.ExecutionContext) (models.Rows, error) { +func (e *StatementExecutor) executeExplainStatement(ctx *query.ExecutionContext, q *influxql.ExplainStatement) (models.Rows, error) { opt := query.SelectOptions{ NodeID: ctx.ExecutionOptions.NodeID, MaxSeriesN: e.MaxSelectSeriesN, @@ -439,7 +442,7 @@ func (e *StatementExecutor) executeExplainStatement(q *influxql.ExplainStatement return models.Rows{row}, nil } -func (e *StatementExecutor) executeExplainAnalyzeStatement(q *influxql.ExplainStatement, ectx *query.ExecutionContext) (models.Rows, error) { +func (e *StatementExecutor) executeExplainAnalyzeStatement(ectx *query.ExecutionContext, q *influxql.ExplainStatement) (models.Rows, error) { stmt := q.Statement t, span := tracing.NewTrace("select") ctx := tracing.NewContextWithTrace(ectx, t) @@ -538,7 +541,7 @@ func (e *StatementExecutor) executeSetPasswordUserStatement(q *influxql.SetPassw return e.MetaClient.UpdateUser(q.Name, q.Password) } -func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) executeSelectStatement(ctx *query.ExecutionContext, stmt *influxql.SelectStatement) error { cur, err := e.createIterators(ctx, stmt, ctx.ExecutionOptions) if err != nil { return err @@ -573,7 +576,7 @@ func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatemen // Write points back into system for INTO statements. if stmt.Target != nil { - n, err := e.writeInto(pointsWriter, stmt, row) + n, err := e.writeInto(pointsWriter, stmt, row, e.StrictErrorHandling) if err != nil { return err } @@ -656,9 +659,9 @@ func (e *StatementExecutor) executeShowContinuousQueriesStatement(stmt *influxql return rows, nil } -func (e *StatementExecutor) executeShowDatabasesStatement(q *influxql.ShowDatabasesStatement, ctx *query.ExecutionContext) (models.Rows, error) { +func (e *StatementExecutor) executeShowDatabasesStatement(ctx *query.ExecutionContext, q *influxql.ShowDatabasesStatement) (models.Rows, error) { dis := e.MetaClient.Databases() - a := ctx.ExecutionOptions.Authorizer + a := ctx.ExecutionOptions.CoarseAuthorizer row := &models.Row{Name: "databases", Columns: []string{"name"}} for _, di := range dis { @@ -711,12 +714,12 @@ func (e *StatementExecutor) executeShowGrantsForUserStatement(q *influxql.ShowGr return []*models.Row{row}, nil } -func (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMeasurementsStatement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) executeShowMeasurementsStatement(ctx *query.ExecutionContext, q *influxql.ShowMeasurementsStatement) error { if q.Database == "" { return ErrDatabaseNameRequired } - names, err := e.TSDBStore.MeasurementNames(ctx.Authorizer, q.Database, q.Condition) + names, err := e.TSDBStore.MeasurementNames(ctx.Context, ctx.Authorizer, q.Database, q.Condition) if err != nil || len(names) == 0 { return ctx.Send(&query.Result{ Err: err, @@ -755,12 +758,12 @@ func (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMea }) } -func (e *StatementExecutor) executeShowMeasurementCardinalityStatement(stmt *influxql.ShowMeasurementCardinalityStatement) (models.Rows, error) { +func (e *StatementExecutor) executeShowMeasurementCardinalityStatement(ctx *query.ExecutionContext, stmt *influxql.ShowMeasurementCardinalityStatement) (models.Rows, error) { if stmt.Database == "" { return nil, ErrDatabaseNameRequired } - n, err := e.TSDBStore.MeasurementsCardinality(stmt.Database) + n, err := e.TSDBStore.MeasurementsCardinality(ctx.Context, stmt.Database) if err != nil { return nil, err } @@ -826,12 +829,12 @@ func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShards return rows, nil } -func (e *StatementExecutor) executeShowSeriesCardinalityStatement(stmt *influxql.ShowSeriesCardinalityStatement) (models.Rows, error) { +func (e *StatementExecutor) executeShowSeriesCardinalityStatement(ctx *query.ExecutionContext, stmt *influxql.ShowSeriesCardinalityStatement) (models.Rows, error) { if stmt.Database == "" { return nil, ErrDatabaseNameRequired } - n, err := e.TSDBStore.SeriesCardinality(stmt.Database) + n, err := e.TSDBStore.SeriesCardinality(ctx.Context, stmt.Database) if err != nil { return nil, err } @@ -926,7 +929,7 @@ func (e *StatementExecutor) executeShowSubscriptionsStatement(stmt *influxql.Sho return rows, nil } -func (e *StatementExecutor) executeShowTagKeys(q *influxql.ShowTagKeysStatement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) executeShowTagKeys(ctx *query.ExecutionContext, q *influxql.ShowTagKeysStatement) error { if q.Database == "" { return ErrDatabaseNameRequired } @@ -963,7 +966,7 @@ func (e *StatementExecutor) executeShowTagKeys(q *influxql.ShowTagKeysStatement, } } - tagKeys, err := e.TSDBStore.TagKeys(ctx.Authorizer, shardIDs, cond) + tagKeys, err := e.TSDBStore.TagKeys(ctx.Context, ctx.Authorizer, shardIDs, cond) if err != nil { return ctx.Send(&query.Result{ Err: err, @@ -1013,7 +1016,7 @@ func (e *StatementExecutor) executeShowTagKeys(q *influxql.ShowTagKeysStatement, return nil } -func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) executeShowTagValues(ctx *query.ExecutionContext, q *influxql.ShowTagValuesStatement) error { if q.Database == "" { return ErrDatabaseNameRequired } @@ -1050,7 +1053,7 @@ func (e *StatementExecutor) executeShowTagValues(q *influxql.ShowTagValuesStatem } } - tagValues, err := e.TSDBStore.TagValues(ctx.Authorizer, shardIDs, cond) + tagValues, err := e.TSDBStore.TagValues(ctx.Context, ctx.Authorizer, shardIDs, cond) if err != nil { return ctx.Send(&query.Result{Err: err}) } @@ -1188,7 +1191,7 @@ func (w *BufferedPointsWriter) Len() int { return len(w.buf) } // Cap returns the capacity (in points) of the buffer. func (w *BufferedPointsWriter) Cap() int { return cap(w.buf) } -func (e *StatementExecutor) writeInto(w pointsWriter, stmt *influxql.SelectStatement, row *models.Row) (n int64, err error) { +func (e *StatementExecutor) writeInto(w pointsWriter, stmt *influxql.SelectStatement, row *models.Row, strictErrorHandling bool) (n int64, err error) { if stmt.Target.Measurement.Database == "" { return 0, errNoDatabaseInTarget } @@ -1205,7 +1208,7 @@ func (e *StatementExecutor) writeInto(w pointsWriter, stmt *influxql.SelectState name = row.Name } - points, err := convertRowToPoints(name, row) + points, err := convertRowToPoints(name, row, strictErrorHandling) if err != nil { return 0, err } @@ -1224,7 +1227,7 @@ func (e *StatementExecutor) writeInto(w pointsWriter, stmt *influxql.SelectState var errNoDatabaseInTarget = errors.New("no database in target") // convertRowToPoints will convert a query result Row into Points that can be written back in. -func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point, error) { +func convertRowToPoints(measurementName string, row *models.Row, strictErrorHandling bool) ([]models.Point, error) { // figure out which parts of the result are the time and which are the fields timeIndex := -1 fieldIndexes := make(map[string]int) @@ -1256,13 +1259,16 @@ func convertRowToPoints(measurementName string, row *models.Row) ([]models.Point p, err := models.NewPoint(measurementName, models.NewTags(row.Tags), vals, v[timeIndex].(time.Time)) if err != nil { - // Drop points that can't be stored - continue + if !strictErrorHandling { + // Drop points that can't be stored + continue + } else { + return nil, err + } } points = append(points, p) } - return points, nil } @@ -1368,12 +1374,12 @@ type TSDBStore interface { DeleteSeries(database string, sources []influxql.Source, condition influxql.Expr) error DeleteShard(id uint64) error - MeasurementNames(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) - TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) - TagValues(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) + MeasurementNames(ctx context.Context, auth query.FineAuthorizer, database string, cond influxql.Expr) ([][]byte, error) + TagKeys(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) + TagValues(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) - SeriesCardinality(database string) (int64, error) - MeasurementsCardinality(database string) (int64, error) + SeriesCardinality(ctx context.Context, database string) (int64, error) + MeasurementsCardinality(ctx context.Context, database string) (int64, error) } var _ TSDBStore = LocalTSDBStore{} diff --git a/coordinator/statement_executor_test.go b/coordinator/statement_executor_test.go index 5d64c63e98b..8855f4ac318 100644 --- a/coordinator/statement_executor_test.go +++ b/coordinator/statement_executor_test.go @@ -383,26 +383,14 @@ func TestStatementExecutor_NormalizeDeleteSeries(t *testing.T) { } } -type mockAuthorizer struct { +type mockCoarseAuthorizer struct { AuthorizeDatabaseFn func(influxql.Privilege, string) bool } -func (a *mockAuthorizer) AuthorizeDatabase(p influxql.Privilege, name string) bool { +func (a *mockCoarseAuthorizer) AuthorizeDatabase(p influxql.Privilege, name string) bool { return a.AuthorizeDatabaseFn(p, name) } -func (m *mockAuthorizer) AuthorizeQuery(database string, query *influxql.Query) error { - panic("fail") -} - -func (m *mockAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { - panic("fail") -} - -func (m *mockAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { - panic("fail") -} - func TestQueryExecutor_ExecuteQuery_ShowDatabases(t *testing.T) { qe := query.NewExecutor() qe.StatementExecutor = &coordinator.StatementExecutor{ @@ -416,7 +404,7 @@ func TestQueryExecutor_ExecuteQuery_ShowDatabases(t *testing.T) { } opt := query.ExecutionOptions{ - Authorizer: &mockAuthorizer{ + CoarseAuthorizer: &mockCoarseAuthorizer{ AuthorizeDatabaseFn: func(p influxql.Privilege, name string) bool { return name == "db2" || name == "db4" }, @@ -468,11 +456,11 @@ func NewQueryExecutor() *QueryExecutor { return nil } - e.TSDBStore.MeasurementNamesFn = func(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) { + e.TSDBStore.MeasurementNamesFn = func(auth query.FineAuthorizer, database string, cond influxql.Expr) ([][]byte, error) { return nil, nil } - e.TSDBStore.TagValuesFn = func(_ query.Authorizer, _ []uint64, _ influxql.Expr) ([]tsdb.TagValues, error) { + e.TSDBStore.TagValuesFn = func(_ query.FineAuthorizer, _ []uint64, _ influxql.Expr) ([]tsdb.TagValues, error) { return nil, nil } diff --git a/etc/config.sample.toml b/etc/config.sample.toml index 23d29490d81..ff40e8f5d5d 100644 --- a/etc/config.sample.toml +++ b/etc/config.sample.toml @@ -67,6 +67,10 @@ # log any sensitive data contained within a query. # query-log-enabled = true + # Provides more error checking. For example, SELECT INTO will err out inserting an +/-Inf value + # rather than silently failing. + # strict-error-handling = false + # Validates incoming writes to ensure keys only have valid unicode characters. # This setting will incur a small overhead because every key must be checked. # validate-keys = false diff --git a/flux/stdlib/influxdata/influxdb/v1/databases.go b/flux/stdlib/influxdata/influxdb/v1/databases.go index 1779f411c51..bcada5913c6 100644 --- a/flux/stdlib/influxdata/influxdb/v1/databases.go +++ b/flux/stdlib/influxdata/influxdb/v1/databases.go @@ -9,7 +9,7 @@ import ( "github.com/influxdata/flux/execute" "github.com/influxdata/flux/memory" "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1" + v1 "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1" "github.com/influxdata/flux/values" "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" "github.com/influxdata/influxdb/services/meta" diff --git a/go.mod b/go.mod index 236de26b2f1..b43bd00c276 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/influxdata/influxdb -go 1.12 +go 1.13 require ( cloud.google.com/go/bigtable v1.2.0 // indirect @@ -11,7 +11,7 @@ require ( github.com/boltdb/bolt v1.3.1 github.com/cespare/xxhash v1.1.0 github.com/davecgh/go-spew v1.1.1 - github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd // indirect github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 // indirect @@ -44,6 +44,7 @@ require ( github.com/smartystreets/goconvey v1.6.4 // indirect github.com/soheilhy/cmux v0.1.4 // indirect github.com/spf13/cast v1.3.0 + github.com/stretchr/testify v1.5.1 github.com/tinylib/msgp v1.0.2 github.com/willf/bitset v1.1.3 // indirect github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 diff --git a/go.sum b/go.sum index 4a5be1cc41e..567a1bc2fc2 100644 --- a/go.sum +++ b/go.sum @@ -53,9 +53,7 @@ github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= @@ -84,8 +82,8 @@ github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhr github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= @@ -112,7 +110,6 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -159,7 +156,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= @@ -195,7 +191,6 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1 h1:77BcVUCzvN5HMm8+j9PRBQ4iZcu98Dl4Y9rf+J5vhnc= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= @@ -216,7 +211,6 @@ github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfE github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0 h1:0Dz2s/eturmdUS34GM82JwNEdQ9hPoJgqptcEKcbpzY= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= @@ -234,10 +228,8 @@ github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTje github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -247,7 +239,6 @@ github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= @@ -267,7 +258,6 @@ github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f h1:O62NGAXV0cNzBI6e github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= @@ -280,7 +270,6 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -296,30 +285,23 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -333,7 +315,6 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -348,9 +329,7 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -363,7 +342,6 @@ golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxT golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= @@ -378,7 +356,6 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -405,11 +382,9 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -430,10 +405,8 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPe golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -443,9 +416,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -461,13 +432,11 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50 h1:YvQ10rzcqWXLlJZ3XCUoO25savxmscf4+SC+ZqiCHhA= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -492,13 +461,11 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -528,7 +495,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108203644-89082a384178 h1:f5gMxb6FbpY48csegk9UPd7IAHVrBD013CU7N4pWzoE= golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -556,26 +522,21 @@ golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58 h1:1Bs6RVeBFtLZ8Yi1Hk07DiO golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.0 h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= @@ -594,7 +555,6 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= @@ -612,7 +572,6 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f h1:2wh8dWY8959cBGQvk1RD+/eQBgRYYDaZ+hT0/zsARoA= google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -626,7 +585,6 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -640,16 +598,13 @@ google.golang.org/genproto v0.0.0-20201211151036-40ec1c210f7a h1:GnJAhasbD8HiT8D google.golang.org/genproto v0.0.0-20201211151036-40ec1c210f7a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -657,9 +612,6 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0-dev.0.20201214170045-17e2cbe88713 h1:FdVS6x+O/OkqvvLJAOmfSpvM3D+2PszOhQ+G6geXOfE= -google.golang.org/grpc v1.35.0-dev.0.20201214170045-17e2cbe88713/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.0.1 h1:M8spwkmx0pHrPq+uMdl22w5CvJ/Y+oAJTIs9oGoCpOE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.0.1/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -674,7 +626,6 @@ google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= @@ -686,11 +637,9 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/internal/authorizer.go b/internal/authorizer.go index 07847f5258e..5a2e9df1c21 100644 --- a/internal/authorizer.go +++ b/internal/authorizer.go @@ -5,7 +5,7 @@ import ( "github.com/influxdata/influxql" ) -// AuthorizerMock is a mockable implementation of a query.Authorizer. +// AuthorizerMock is a mockable implementation of a query.FineAuthorizer + query.CoarseAuthorizer type AuthorizerMock struct { AuthorizeDatabaseFn func(influxql.Privilege, string) bool AuthorizeQueryFn func(database string, query *influxql.Query) error @@ -36,3 +36,7 @@ func (a *AuthorizerMock) AuthorizeSeriesRead(database string, measurement []byte func (a *AuthorizerMock) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { return a.AuthorizeSeriesWriteFn(database, measurement, tags) } + +func (a *AuthorizerMock) IsOpen() bool { + return false +} diff --git a/internal/tsdb_store.go b/internal/tsdb_store.go index e2f27ca1a85..fc3eecfd3aa 100644 --- a/internal/tsdb_store.go +++ b/internal/tsdb_store.go @@ -1,6 +1,7 @@ package internal import ( + "context" "io" "time" @@ -30,7 +31,7 @@ type TSDBStoreMock struct { ImportShardFn func(id uint64, r io.Reader) error MeasurementSeriesCountsFn func(database string) (measuments int, series int) MeasurementsCardinalityFn func(database string) (int64, error) - MeasurementNamesFn func(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) + MeasurementNamesFn func(auth query.FineAuthorizer, database string, cond influxql.Expr) ([][]byte, error) OpenFn func() error PathFn func() string RestoreShardFn func(id uint64, r io.Reader) error @@ -43,8 +44,8 @@ type TSDBStoreMock struct { ShardRelativePathFn func(id uint64) (string, error) ShardsFn func(ids []uint64) []*tsdb.Shard StatisticsFn func(tags map[string]string) []models.Statistic - TagKeysFn func(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) - TagValuesFn func(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) + TagKeysFn func(auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) + TagValuesFn func(auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) WithLoggerFn func(log *zap.Logger) WriteToShardFn func(shardID uint64, points []models.Point) error } @@ -92,13 +93,13 @@ func (s *TSDBStoreMock) ExpandSources(sources influxql.Sources) (influxql.Source func (s *TSDBStoreMock) ImportShard(id uint64, r io.Reader) error { return s.ImportShardFn(id, r) } -func (s *TSDBStoreMock) MeasurementNames(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) { +func (s *TSDBStoreMock) MeasurementNames(ctx context.Context, auth query.FineAuthorizer, database string, cond influxql.Expr) ([][]byte, error) { return s.MeasurementNamesFn(auth, database, cond) } func (s *TSDBStoreMock) MeasurementSeriesCounts(database string) (measuments int, series int) { return s.MeasurementSeriesCountsFn(database) } -func (s *TSDBStoreMock) MeasurementsCardinality(database string) (int64, error) { +func (s *TSDBStoreMock) MeasurementsCardinality(ctx context.Context, database string) (int64, error) { return s.MeasurementsCardinalityFn(database) } func (s *TSDBStoreMock) Open() error { @@ -110,7 +111,7 @@ func (s *TSDBStoreMock) Path() string { func (s *TSDBStoreMock) RestoreShard(id uint64, r io.Reader) error { return s.RestoreShardFn(id, r) } -func (s *TSDBStoreMock) SeriesCardinality(database string) (int64, error) { +func (s *TSDBStoreMock) SeriesCardinality(ctx context.Context, database string) (int64, error) { return s.SeriesCardinalityFn(database) } func (s *TSDBStoreMock) SetShardEnabled(shardID uint64, enabled bool) error { @@ -137,10 +138,10 @@ func (s *TSDBStoreMock) Shards(ids []uint64) []*tsdb.Shard { func (s *TSDBStoreMock) Statistics(tags map[string]string) []models.Statistic { return s.StatisticsFn(tags) } -func (s *TSDBStoreMock) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) { +func (s *TSDBStoreMock) TagKeys(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) { return s.TagKeysFn(auth, shardIDs, cond) } -func (s *TSDBStoreMock) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) { +func (s *TSDBStoreMock) TagValues(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) { return s.TagValuesFn(auth, shardIDs, cond) } func (s *TSDBStoreMock) WithLogger(log *zap.Logger) { diff --git a/pkg/reporthelper/walkshards.go b/pkg/reporthelper/walkshards.go new file mode 100644 index 00000000000..bb3e879a963 --- /dev/null +++ b/pkg/reporthelper/walkshards.go @@ -0,0 +1,71 @@ +// Package report reports statistics about TSM files. +package reporthelper + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +func IsShardDir(dir string) error { + name := filepath.Base(dir) + if id, err := strconv.Atoi(name); err != nil || id < 1 { + return fmt.Errorf("not a valid shard dir: %v", dir) + } + + return nil +} + +func WalkShardDirs(root string, fn func(db, rp, id, path string) error) error { + type location struct { + db, rp, id, path string + } + + var dirs []location + if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + if filepath.Ext(info.Name()) == "."+tsm1.TSMFileExtension { + shardDir := filepath.Dir(path) + + if err := IsShardDir(shardDir); err != nil { + return err + } + absPath, err := filepath.Abs(path) + if err != nil { + return err + } + parts := strings.Split(absPath, string(filepath.Separator)) + db, rp, id := parts[len(parts)-4], parts[len(parts)-3], parts[len(parts)-2] + dirs = append(dirs, location{db: db, rp: rp, id: id, path: path}) + return nil + } + return nil + }); err != nil { + return err + } + + sort.Slice(dirs, func(i, j int) bool { + a, _ := strconv.Atoi(dirs[i].id) + b, _ := strconv.Atoi(dirs[j].id) + return a < b + }) + + for _, shard := range dirs { + if err := fn(shard.db, shard.rp, shard.id, shard.path); err != nil { + return err + } + } + return nil +} diff --git a/pkg/tracing/wire/binary.pb.go b/pkg/tracing/wire/binary.pb.go index 377bea888e3..1f1ac0cd860 100644 --- a/pkg/tracing/wire/binary.pb.go +++ b/pkg/tracing/wire/binary.pb.go @@ -15,17 +15,23 @@ */ package wire -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" -import _ "github.com/gogo/protobuf/types" +import ( + fmt "fmt" -import time "time" + proto "github.com/gogo/protobuf/proto" -import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + math "math" -import io "io" + _ "github.com/gogo/protobuf/gogoproto" + + _ "github.com/gogo/protobuf/types" + + time "time" + + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/prometheus/converters.go b/prometheus/converters.go index 8de16403891..71d883841d2 100644 --- a/prometheus/converters.go +++ b/prometheus/converters.go @@ -240,7 +240,9 @@ func nodeFromMatcher(m *remote.LabelMatcher) (*datatypes.Node, error) { right = &datatypes.Node{ NodeType: datatypes.NodeTypeLiteral, Value: &datatypes.Node_RegexValue{ - RegexValue: m.Value, + // To comply with PromQL, see + // https://github.com/prometheus/prometheus/blob/daf382e4a9f5ca380b2b662c8e60755a56675f14/pkg/labels/regexp.go#L30 + RegexValue: "^(?:" + m.Value + ")$", }, } } else { diff --git a/prometheus/remote/remote.pb.go b/prometheus/remote/remote.pb.go index a9e78261e14..4481f4a91fe 100644 --- a/prometheus/remote/remote.pb.go +++ b/prometheus/remote/remote.pb.go @@ -21,11 +21,15 @@ */ package remote -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import io "io" + proto "github.com/gogo/protobuf/proto" + + math "math" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/query/executor.go b/query/executor.go index 5f9cbeaffb2..e15ee919804 100644 --- a/query/executor.go +++ b/query/executor.go @@ -68,19 +68,33 @@ func ErrMaxConcurrentQueriesLimitExceeded(n, limit int) error { return fmt.Errorf("max-concurrent-queries limit exceeded(%d, %d)", n, limit) } -// Authorizer determines if certain operations are authorized. -type Authorizer interface { +// CoarseAuthorizer determines if certain operations are authorized at the database level. +// +// It is supported both in OSS and Enterprise. +type CoarseAuthorizer interface { // AuthorizeDatabase indicates whether the given Privilege is authorized on the database with the given name. AuthorizeDatabase(p influxql.Privilege, name string) bool +} + +type openCoarseAuthorizer struct{} - // AuthorizeQuery returns an error if the query cannot be executed - AuthorizeQuery(database string, query *influxql.Query) error +func (a openCoarseAuthorizer) AuthorizeDatabase(influxql.Privilege, string) bool { return true } +// OpenCoarseAuthorizer is a fully permissive implementation of CoarseAuthorizer. +var OpenCoarseAuthorizer openCoarseAuthorizer + +// FineAuthorizer determines if certain operations are authorized at the series level. +// +// It is only supported in InfluxDB Enterprise. In OSS it always returns true. +type FineAuthorizer interface { // AuthorizeSeriesRead determines if a series is authorized for reading AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool // AuthorizeSeriesWrite determines if a series is authorized for writing AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool + + // IsOpen guarantees that the other methods of a FineAuthorizer always return true. + IsOpen() bool } // OpenAuthorizer is the Authorizer used when authorization is disabled. @@ -90,9 +104,6 @@ type openAuthorizer struct{} // OpenAuthorizer can be shared by all goroutines. var OpenAuthorizer = openAuthorizer{} -// AuthorizeDatabase returns true to allow any operation on a database. -func (a openAuthorizer) AuthorizeDatabase(influxql.Privilege, string) bool { return true } - // AuthorizeSeriesRead allows access to any series. func (a openAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { return true @@ -103,6 +114,8 @@ func (a openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte return true } +func (a openAuthorizer) IsOpen() bool { return true } + // AuthorizeSeriesRead allows any query to execute. func (a openAuthorizer) AuthorizeQuery(_ string, _ *influxql.Query) error { return nil } @@ -110,11 +123,8 @@ func (a openAuthorizer) AuthorizeQuery(_ string, _ *influxql.Query) error { retu // authorize anything. A nil Authorizer returns true for this function, and this // function should be preferred over directly checking if an Authorizer is nil // or not. -func AuthorizerIsOpen(a Authorizer) bool { - if u, ok := a.(interface{ AuthorizeUnrestricted() bool }); ok { - return u.AuthorizeUnrestricted() - } - return a == nil || a == OpenAuthorizer +func AuthorizerIsOpen(a FineAuthorizer) bool { + return a == nil || a.IsOpen() } // ExecutionOptions contains the options for executing a query. @@ -125,9 +135,11 @@ type ExecutionOptions struct { // The retention policy the query is running against. RetentionPolicy string - // How to determine whether the query is allowed to execute, - // what resources can be returned in SHOW queries, etc. - Authorizer Authorizer + // Authorizer handles series-level authorization + Authorizer FineAuthorizer + + // CoarseAuthorizer handles database-level authorization + CoarseAuthorizer CoarseAuthorizer // The requested maximum number of points to return in each result. ChunkSize int @@ -160,7 +172,7 @@ func NewContextWithIterators(ctx context.Context, itr *Iterators) context.Contex type StatementExecutor interface { // ExecuteStatement executes a statement. Results should be sent to the // results channel in the ExecutionContext. - ExecuteStatement(stmt influxql.Statement, ctx *ExecutionContext) error + ExecuteStatement(ctx *ExecutionContext, stmt influxql.Statement) error } // StatementNormalizer normalizes a statement before it is executed. @@ -332,7 +344,7 @@ LOOP: } // Send any other statements to the underlying statement executor. - err = e.StatementExecutor.ExecuteStatement(stmt, ctx) + err = e.StatementExecutor.ExecuteStatement(ctx, stmt) if err == ErrQueryInterrupted { // Query was interrupted so retrieve the real interrupt error from // the query task if there is one. diff --git a/query/executor_test.go b/query/executor_test.go index 6f7f0c1afb0..8ed49fde87e 100644 --- a/query/executor_test.go +++ b/query/executor_test.go @@ -17,7 +17,7 @@ type StatementExecutor struct { ExecuteStatementFn func(stmt influxql.Statement, ctx *query.ExecutionContext) error } -func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) ExecuteStatement(ctx *query.ExecutionContext, stmt influxql.Statement) error { return e.ExecuteStatementFn(stmt, ctx) } @@ -57,7 +57,7 @@ func TestQueryExecutor_KillQuery(t *testing.T) { ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { switch stmt.(type) { case *influxql.KillQueryStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) + return e.TaskManager.ExecuteStatement(ctx, stmt) } qid <- ctx.QueryID @@ -98,7 +98,7 @@ func TestQueryExecutor_KillQuery_Zombie(t *testing.T) { ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { switch stmt.(type) { case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) + return e.TaskManager.ExecuteStatement(ctx, stmt) } qid <- ctx.QueryID @@ -167,7 +167,7 @@ func TestQueryExecutor_KillQuery_CloseTaskManager(t *testing.T) { ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { switch stmt.(type) { case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) + return e.TaskManager.ExecuteStatement(ctx, stmt) } qid <- ctx.QueryID @@ -224,7 +224,7 @@ func TestQueryExecutor_KillQuery_AlreadyKilled(t *testing.T) { ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { switch stmt.(type) { case *influxql.KillQueryStatement, *influxql.ShowQueriesStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) + return e.TaskManager.ExecuteStatement(ctx, stmt) } qid <- ctx.QueryID @@ -314,7 +314,7 @@ func TestQueryExecutor_ShowQueries(t *testing.T) { ExecuteStatementFn: func(stmt influxql.Statement, ctx *query.ExecutionContext) error { switch stmt.(type) { case *influxql.ShowQueriesStatement: - return e.TaskManager.ExecuteStatement(stmt, ctx) + return e.TaskManager.ExecuteStatement(ctx, stmt) } t.Errorf("unexpected statement: %s", stmt) diff --git a/query/internal/internal.pb.go b/query/internal/internal.pb.go index dd76e1b0232..dd31790687b 100644 --- a/query/internal/internal.pb.go +++ b/query/internal/internal.pb.go @@ -19,9 +19,13 @@ It has these top-level messages: */ package query -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + + proto "github.com/gogo/protobuf/proto" + + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/query/iterator.go b/query/iterator.go index 232d5abf21f..cb59391abd4 100644 --- a/query/iterator.go +++ b/query/iterator.go @@ -613,7 +613,7 @@ type IteratorOptions struct { InterruptCh <-chan struct{} // Authorizer can limit access to data - Authorizer Authorizer + Authorizer FineAuthorizer } // newIteratorOptionsStmt creates the iterator options from stmt. @@ -815,6 +815,8 @@ func (opt IteratorOptions) Window(t int64) (start, end int64) { start = t - dt } + start += int64(opt.Interval.Offset) + // Look for the start offset again because the first time may have been // after the offset switch. Now that we are at midnight in UTC, we can // lookup the zone offset again to get the real starting offset. @@ -826,7 +828,6 @@ func (opt IteratorOptions) Window(t int64) (start, end int64) { start += o } } - start += int64(opt.Interval.Offset) // Find the end time. if dt := int64(opt.Interval.Duration) - dt; influxql.MaxTime-dt <= t { diff --git a/query/iterator_test.go b/query/iterator_test.go index 574a4fd14b1..3cf9457787e 100644 --- a/query/iterator_test.go +++ b/query/iterator_test.go @@ -825,6 +825,61 @@ func TestFillIterator_ImplicitStartTime(t *testing.T) { } } +// A count() GROUP BY query with an offset that caused an interval +// to cross a daylight savings change inserted an extra output row +// off by one hour in a grouped count() expression. +// https://github.com/influxdata/influxdb/issues/20238 + +func TestGroupByIterator_DST(t *testing.T) { + inputIter := &IntegerIterator{ + Points: []query.IntegerPoint{ + {Name: "a", Tags: ParseTags("t=A"), Time: 1584345600000000000, Value: 1}, + {Name: "a", Tags: ParseTags("t=A"), Time: 1584432000000000000, Value: 2}, + {Name: "a", Tags: ParseTags("t=A"), Time: 1584518400000000000, Value: 3}, + {Name: "a", Tags: ParseTags("t=A"), Time: 1585555200000000000, Value: 4}, + }, + } + const location = "Europe/Rome" + loc, err := time.LoadLocation(location) + if err != nil { + t.Fatalf("Cannot find timezone for %s: %s", location, err) + } + opt := query.IteratorOptions{ + StartTime: mustParseTime("2020-03-15T00:00:00Z").UnixNano(), + EndTime: mustParseTime("2020-04-01T00:00:00Z").UnixNano(), + Ascending: true, + Ordered: true, + StripName: false, + Fill: influxql.NullFill, + FillValue: nil, + Dedupe: false, + Interval: query.Interval{ + Duration: 7 * 24 * time.Hour, + Offset: 4 * 24 * time.Hour, + }, + Expr: MustParseExpr("count(Value)"), + Location: loc, + } + + groupByIter, err := query.NewCallIterator(inputIter, opt) + if err != nil { + t.Fatalf("Cannot create Count and Group By iterator: %s", err) + } else { + groupByIter = query.NewFillIterator(groupByIter, MustParseExpr("count(Value)"), opt) + } + + if a, err := (Iterators{groupByIter}).ReadAll(); err != nil { + t.Fatalf("unexpected error: %s", err) + } else if !deep.Equal(a, [][]query.Point{ + {&query.IntegerPoint{Name: "a", Aggregated: 0, Time: mustParseTime("2020-03-09T00:00:00+01:00").UnixNano(), Value: 0}}, + {&query.IntegerPoint{Name: "a", Aggregated: 3, Time: mustParseTime("2020-03-16T00:00:00+01:00").UnixNano(), Value: 3}}, + {&query.IntegerPoint{Name: "a", Aggregated: 0, Time: mustParseTime("2020-03-23T00:00:00+01:00").UnixNano(), Value: 0}}, + {&query.IntegerPoint{Name: "a", Aggregated: 1, Time: mustParseTime("2020-03-30T00:00:00+02:00").UnixNano(), Value: 1}}, + }) { + t.Fatalf("unexpected points: %s", spew.Sdump(a)) + } +} + func TestFillIterator_DST(t *testing.T) { for _, tt := range []struct { name string diff --git a/query/select.go b/query/select.go index f070d7131b7..fcdea255be8 100644 --- a/query/select.go +++ b/query/select.go @@ -21,7 +21,7 @@ var DefaultTypeMapper = influxql.MultiTypeMapper( // SelectOptions are options that customize the select call. type SelectOptions struct { // Authorizer is used to limit access to data - Authorizer Authorizer + Authorizer FineAuthorizer // Node to exclusively read from. // If zero, all nodes are used. diff --git a/query/subquery_test.go b/query/subquery_test.go index 6589e524b2c..8789642defe 100644 --- a/query/subquery_test.go +++ b/query/subquery_test.go @@ -402,6 +402,9 @@ func (*openAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, func (*openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { return true } +func (*openAuthorizer) IsOpen() bool { + return true +} // Ensure that the subquery gets passed the query authorizer. func TestSubquery_Authorizer(t *testing.T) { diff --git a/query/task_manager.go b/query/task_manager.go index f380d14e733..a46d73bc284 100644 --- a/query/task_manager.go +++ b/query/task_manager.go @@ -93,7 +93,7 @@ func NewTaskManager() *TaskManager { } // ExecuteStatement executes a statement containing one of the task management queries. -func (t *TaskManager) ExecuteStatement(stmt influxql.Statement, ctx *ExecutionContext) error { +func (t *TaskManager) ExecuteStatement(ctx *ExecutionContext, stmt influxql.Statement) error { switch stmt := stmt.(type) { case *influxql.ShowQueriesStatement: rows, err := t.executeShowQueriesStatement(stmt) diff --git a/releng/packages/spec/clean_install/_install_uninstall.bash b/releng/packages/spec/clean_install/_install_uninstall.bash index 4239d7f3d54..f3ef2321d10 100755 --- a/releng/packages/spec/clean_install/_install_uninstall.bash +++ b/releng/packages/spec/clean_install/_install_uninstall.bash @@ -27,7 +27,7 @@ function testInstalled() { function testUninstalled() { if command -v "$1" >/dev/null 2>&1 ; then - >&2 echo "$1 still on \$PATH after install" + >&2 echo "$1 still on \$PATH after uninstall" exit 1 fi } diff --git a/releng/packages/spec/clean_install/run.bash b/releng/packages/spec/clean_install/run.bash index 2de027d3ebf..bb070be9bc0 100755 --- a/releng/packages/spec/clean_install/run.bash +++ b/releng/packages/spec/clean_install/run.bash @@ -63,7 +63,7 @@ fi if [ -n "$IS_RPM" ]; then # Latest is the most recent LTS, and Rolling is the most recent release. - for BASE_IMAGE in centos:6 centos:7 ; do + for BASE_IMAGE in centos:7 centos:8 ; do if [ -n "$PKG" ]; then dockerTest "$PKG" /data.rpm -R fi diff --git a/services/continuous_querier/service_test.go b/services/continuous_querier/service_test.go index f2a199d71aa..64c12236a68 100644 --- a/services/continuous_querier/service_test.go +++ b/services/continuous_querier/service_test.go @@ -825,7 +825,7 @@ type StatementExecutor struct { ExecuteStatementFn func(stmt influxql.Statement, ctx *query.ExecutionContext) error } -func (e *StatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query.ExecutionContext) error { +func (e *StatementExecutor) ExecuteStatement(ctx *query.ExecutionContext, stmt influxql.Statement) error { return e.ExecuteStatementFn(stmt, ctx) } diff --git a/services/httpd/handler.go b/services/httpd/handler.go index 4158af3924d..0304cab7de0 100644 --- a/services/httpd/handler.go +++ b/services/httpd/handler.go @@ -25,7 +25,7 @@ import ( httppprof "net/http/pprof" "github.com/bmizerany/pat" - "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go/v4" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/influxdata/flux" @@ -87,6 +87,21 @@ type Route struct { HandlerFunc interface{} } +type QueryAuthorizer interface { + AuthorizeQuery(u meta.User, query *influxql.Query, database string) error + AuthorizeDatabase(u meta.User, priv influxql.Privilege, database string) error +} + +// userQueryAuthorizer binds the QueryAuthorizer with a specific user for consumption by the query engine. +type userQueryAuthorizer struct { + auth QueryAuthorizer + user meta.User +} + +func (a *userQueryAuthorizer) AuthorizeDatabase(p influxql.Privilege, name string) bool { + return a.auth.AuthorizeDatabase(a.user, p, name) == nil +} + // Handler represents an HTTP handler for the InfluxDB server. type Handler struct { mux *pat.PatternServeMux @@ -102,9 +117,7 @@ type Handler struct { AdminUserExists() bool } - QueryAuthorizer interface { - AuthorizeQuery(u meta.User, query *influxql.Query, database string) error - } + QueryAuthorizer QueryAuthorizer WriteAuthorizer interface { AuthorizeWrite(username, database string) error @@ -245,6 +258,7 @@ func NewHandler(c Config) *Handler { if h.Config.AuthEnabled && h.Config.PprofEnabled && h.Config.PprofAuthEnabled { authWrapper = func(handler func(http.ResponseWriter, *http.Request)) interface{} { return func(w http.ResponseWriter, r *http.Request, user meta.User) { + // TODO: This is the only place we use AuthorizeUnrestricted. It would be better to use an explicit permission if user == nil || !user.AuthorizeUnrestricted() { h.Logger.Info("Unauthorized request", zap.String("user", user.ID()), zap.String("path", r.URL.Path)) h.httpError(w, "error authorizing admin access", http.StatusForbidden) @@ -327,6 +341,10 @@ func (h *Handler) Open() { } func (h *Handler) Close() { + + // lets gracefully shut down http connections. we'll give them 10 seconds + // before we shut them down "with extreme predjudice". + if h.accessLog != nil { h.accessLog.Close() h.accessLog = nil @@ -604,14 +622,18 @@ func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user meta.U ChunkSize: chunkSize, ReadOnly: r.Method == "GET", NodeID: nodeID, + // Authorizer is for fine grained auth, not supported by oss. + Authorizer: query.OpenAuthorizer, } if h.Config.AuthEnabled { // The current user determines the authorized actions. - opts.Authorizer = user + opts.CoarseAuthorizer = &userQueryAuthorizer{ + auth: h.QueryAuthorizer, + user: user, + } } else { - // Auth is disabled, so allow everything. - opts.Authorizer = query.OpenAuthorizer + opts.CoarseAuthorizer = query.OpenCoarseAuthorizer } // Make sure if the client disconnects we signal the query to abort diff --git a/services/httpd/handler_test.go b/services/httpd/handler_test.go index f1fc62d0873..c6c758f919c 100644 --- a/services/httpd/handler_test.go +++ b/services/httpd/handler_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - "github.com/dgrijalva/jwt-go" + "github.com/dgrijalva/jwt-go/v4" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/google/go-cmp/cmp" @@ -193,7 +193,7 @@ func TestHandler_Query_Auth(t *testing.T) { h.ServeHTTP(w, req) if w.Code != http.StatusUnauthorized { t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String()) - } else if body := strings.TrimSpace(w.Body.String()); body != `{"error":"signature is invalid"}` { + } else if body := strings.TrimSpace(w.Body.String()); body != `{"error":"token signature is invalid"}` { t.Fatalf("unexpected body: %s", body) } @@ -217,7 +217,7 @@ func TestHandler_Query_Auth(t *testing.T) { h.ServeHTTP(w, req) if w.Code != http.StatusUnauthorized { t.Fatalf("unexpected status: %d: %s", w.Code, w.Body.String()) - } else if !strings.Contains(w.Body.String(), `{"error":"Token is expired`) { + } else if !strings.Contains(w.Body.String(), `{"error":"token is expired`) { t.Fatalf("unexpected body: %s", w.Body.String()) } @@ -2027,7 +2027,7 @@ type HandlerStatementExecutor struct { ExecuteStatementFn func(stmt influxql.Statement, ctx *query.ExecutionContext) error } -func (e *HandlerStatementExecutor) ExecuteStatement(stmt influxql.Statement, ctx *query.ExecutionContext) error { +func (e *HandlerStatementExecutor) ExecuteStatement(ctx *query.ExecutionContext, stmt influxql.Statement) error { return e.ExecuteStatementFn(stmt, ctx) } @@ -2040,6 +2040,10 @@ func (a *HandlerQueryAuthorizer) AuthorizeQuery(u meta.User, query *influxql.Que return a.AuthorizeQueryFn(u, query, database) } +func (a *HandlerQueryAuthorizer) AuthorizeDatabase(u meta.User, priv influxql.Privilege, database string) error { + panic("not implemented") +} + type HandlerPointsWriter struct { WritePointsFn func(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error } diff --git a/services/httpd/pprof.go b/services/httpd/pprof.go index 550e114c532..01ed2844aff 100644 --- a/services/httpd/pprof.go +++ b/services/httpd/pprof.go @@ -3,12 +3,13 @@ package httpd import ( "archive/tar" "bytes" - "compress/gzip" "fmt" "io" "net/http" httppprof "net/http/pprof" + "path" "runtime/pprof" + "runtime/trace" "sort" "strconv" "text/tabwriter" @@ -33,13 +34,6 @@ func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) { } } -// prof describes a profile name and a debug value, or in the case of a CPU -// profile, the number of seconds to collect the profile for. -type prof struct { - Name string - Debug int64 -} - // archiveProfilesAndQueries collects the following profiles: // - goroutine profile // - heap profile @@ -68,52 +62,177 @@ type prof struct { // as there is something there. // func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Request) { - var allProfs = []*prof{ - {Name: "goroutine", Debug: 1}, - {Name: "block", Debug: 1}, - {Name: "mutex", Debug: 1}, - {Name: "heap", Debug: 1}, + // prof describes a profile name and a debug value, or in the case of a CPU + // profile, the number of seconds to collect the profile for. + type prof struct { + Name string // name of profile + Duration time.Duration // duration of profile if applicable. curently only used by cpu and trace } - // Capture a CPU profile? - if r.FormValue("cpu") != "" { - profile := &prof{Name: "cpu"} + var profiles = []prof{ + {Name: "goroutine"}, + {Name: "block"}, + {Name: "mutex"}, + {Name: "heap"}, + {Name: "allocs"}, + {Name: "threadcreate"}, + } - // For a CPU profile we'll use the Debug field to indicate the number of - // seconds to capture the profile for. - profile.Debug, _ = strconv.ParseInt(r.FormValue("seconds"), 10, 64) - if profile.Debug <= 0 { - profile.Debug = 30 + // We parse the form here so that we can use the http.Request.Form map. + // + // Otherwise we'd have to use r.FormValue() which makes it impossible to + // distinuish between a form value that exists and has no value and one that + // does not exist at all. + if err := r.ParseForm(); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // In the following two blocks, we check if the request should include cpu + // profiles and a trace log. + // + // Since the submitted form can contain multiple version of a variable like: + // + // http://localhost:8086?cpu=1s&cpu=30s&trace=3s&cpu=5s + // + // the question arises: which value should we use? We choose to use the LAST + // value supplied. + // + // This is an edge case but if for some reason, for example, a url is + // programatically built and multiple values are supplied, this will do what + // is expected. + // + + // last() returns either the last item from a slice of strings or an empty + // string if the supplied slice is empty or nill. + last := func(s []string) string { + if len(s) == 0 { + return "" } - allProfs = append([]*prof{profile}, allProfs...) // CPU profile first. + return s[len(s)-1] } - var ( - resp bytes.Buffer // Temporary buffer for entire archive. - buf bytes.Buffer // Temporary buffer for each profile/query result. - ) + // if trace exsits as a form value, add it to the profiles slice with the + // decoded duration. + // + // Requests for a trace should look like: + // + // ?trace=10s + // + if vals, exists := r.Form["trace"]; exists { + // parse the duration encoded in the last "trace" value supplied. + val := last(vals) + duration, err := time.ParseDuration(val) + + // If we can't parse the duration or if the user supplies a negative + // number, return an appropriate error status and message. + // + // In this case it is a StatusBadRequest (400) since the problem is in the + // supplied form data. + if duration < 0 { + http.Error(w, fmt.Sprintf("negative trace durations not allowed"), http.StatusBadRequest) + return + } + + if err != nil { + http.Error(w, fmt.Sprintf("could not parse supplied duration for trace %q", val), http.StatusBadRequest) + return + } + + // Trace files can get big. Lets clamp the maximum trace duration to 45s. + if maxDuration := time.Second * 45; duration > maxDuration { + duration = maxDuration + } + profiles = append(profiles, prof{"trace", duration}) + } + + // Capturing CPU profiles is a little tricker. The preferred way to send the + // the cpu profile duration is via the supplied "cpu" variable's value. + // + // The duration should be encoded as a Go duration that can be parsed by + // time.ParseDuration(). + // + // In the past users were encouraged to assign any value to cpu and provide + // the duration in a separate "seconds" value. + // + // The code below handles both -- first it attempts to use the old method + // which would look like: + // + // ?cpu=foobar&seconds=10 + // + // Then it attempts to ascertain the duration provided with: + // + // ?cpu=10s + // + // This preserves backwards compatibility with any tools that have been + // written to gather profiles. + // + if vals, exists := r.Form["cpu"]; exists { + duration := time.Second * 30 + val := last(vals) + + // getDuration is a small function literal that encapsulates the logic + // for getting the duration from either the "seconds" form value or from + // the value assigned to "cpu". + getDuration := func() (time.Duration, error) { + if seconds, exists := r.Form["seconds"]; exists { + s, err := strconv.ParseInt(last(seconds), 10, 64) + if err != nil { + return 0, err + } + return time.Second * time.Duration(s), nil + } + // see if the value of cpu is a duration like: cpu=10s + return time.ParseDuration(val) + } + + duration, err := getDuration() + if err != nil { + http.Error(w, fmt.Sprintf("could not parse supplied duration for cpu profile %q", val), http.StatusBadRequest) + return + } - gz := gzip.NewWriter(&resp) - tw := tar.NewWriter(gz) + if duration < 0 { + http.Error(w, fmt.Sprintf("negative cpu profile durations not allowed"), http.StatusBadRequest) + return + } + // prepend our profiles slice with cpu -- we want to fetch cpu profiles + // first. + profiles = append([]prof{{"cpu", duration}}, profiles...) + } + + tarball := &bytes.Buffer{} + buf := &bytes.Buffer{} // Temporary buffer for each profile/query result. + + tw := tar.NewWriter(tarball) // Collect and write out profiles. - for _, profile := range allProfs { - if profile.Name == "cpu" { - if err := pprof.StartCPUProfile(&buf); err != nil { + for _, profile := range profiles { + switch profile.Name { + case "cpu": + if err := pprof.StartCPUProfile(buf); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } - - sleep(w, time.Duration(profile.Debug)*time.Second) + sleep(r, profile.Duration) pprof.StopCPUProfile() - } else { + + case "trace": + if err := trace.Start(buf); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + sleep(r, profile.Duration) + trace.Stop() + + default: prof := pprof.Lookup(profile.Name) if prof == nil { http.Error(w, "unable to find profile "+profile.Name, http.StatusInternalServerError) return } - if err := prof.WriteTo(&buf, int(profile.Debug)); err != nil { + if err := prof.WriteTo(buf, 0); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -121,7 +240,7 @@ func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Reque // Write the profile file's header. err := tw.WriteHeader(&tar.Header{ - Name: profile.Name + ".txt", + Name: path.Join("profiles", profile.Name+".pb.gz"), Mode: 0600, Size: int64(buf.Len()), }) @@ -148,7 +267,7 @@ func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Reque {"diagnostics", h.showDiagnostics}, } - tabW := tabwriter.NewWriter(&buf, 8, 8, 1, '\t', 0) + tabW := tabwriter.NewWriter(buf, 8, 8, 1, '\t', 0) for _, query := range allQueries { rows, err := query.fn() if err != nil { @@ -191,7 +310,7 @@ func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Reque } err = tw.WriteHeader(&tar.Header{ - Name: query.name + ".txt", + Name: path.Join("profiles", query.name+".txt"), Mode: 0600, Size: int64(buf.Len()), }) @@ -211,17 +330,13 @@ func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Reque // Close the tar writer. if err := tw.Close(); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) - } - - // Close the gzip writer. - if err := gz.Close(); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + return } // Return the gzipped archive. - w.Header().Set("Content-Disposition", "attachment; filename=profiles.tar.gz") - w.Header().Set("Content-Type", "application/gzip") - io.Copy(w, &resp) // Nothing we can really do about an error at this point. + w.Header().Set("Content-Disposition", "attachment; filename=profiles.tar") + w.Header().Set("Content-Type", "application/x-tar") + io.Copy(w, tarball) } // showShards generates the same values that a StatementExecutor would if a @@ -326,13 +441,10 @@ func joinUint64(a []uint64) string { } // Taken from net/http/pprof/pprof.go -func sleep(w http.ResponseWriter, d time.Duration) { - var clientGone <-chan bool - if cn, ok := w.(http.CloseNotifier); ok { - clientGone = cn.CloseNotify() - } +func sleep(r *http.Request, d time.Duration) { + // wait for either the timer to expire or the contex select { case <-time.After(d): - case <-clientGone: + case <-r.Context().Done(): } } diff --git a/services/httpd/response_logger.go b/services/httpd/response_logger.go index 3b1032872aa..024ba10b447 100644 --- a/services/httpd/response_logger.go +++ b/services/httpd/response_logger.go @@ -101,21 +101,56 @@ func buildLogLine(l *responseLogger, r *http.Request, start time.Time) string { userAgent := r.UserAgent() - return fmt.Sprintf(`%s - %s [%s] "%s %s %s" %s %s "%s" "%s" %s %d`, - host, - detect(username, "-"), - start.Format("02/Jan/2006:15:04:05 -0700"), - r.Method, - uri, - r.Proto, - detect(strconv.Itoa(l.Status()), "-"), - strconv.Itoa(l.Size()), - detect(referer, "-"), - detect(userAgent, "-"), - r.Header.Get("Request-Id"), - // response time, report in microseconds because this is consistent - // with apache's %D parameter in mod_log_config - int64(time.Since(start)/time.Microsecond)) + allKeyValues := make([]string, 0, len(r.PostForm)) + if r.Method == "POST" { + for k, values := range r.PostForm { + if k == "p" || k == "P" { + // Note: if there are multiple "p" values, they are all replaced by a single "[REDACTED]". + r.PostForm.Set(k, "[REDACTED]") + values = r.PostForm[k] + } + valuesSlice := make([]string, 0, len(values)) + for _, v := range values { + valuesSlice = append(valuesSlice, fmt.Sprintf("'%s'", v)) + } + joined := strings.Join(valuesSlice, ", ") + allKeyValues = append(allKeyValues, fmt.Sprintf("{'%s': %s}", k, joined)) + } + + return fmt.Sprintf(`%s - %s [%s] "%s %s %s %s" %s %s "%s" "%s" %s %d`, + host, + detect(username, "-"), + start.Format("02/Jan/2006:15:04:05 -0700"), + r.Method, + uri, + r.Proto, + strings.Join(allKeyValues, ", "), + detect(strconv.Itoa(l.Status()), "-"), + strconv.Itoa(l.Size()), + detect(referer, "-"), + detect(userAgent, "-"), + r.Header.Get("Request-Id"), + // response time, report in microseconds because this is consistent + // with apache's %D parameter in mod_log_config + int64(time.Since(start)/time.Microsecond)) + + } else { + return fmt.Sprintf(`%s - %s [%s] "%s %s %s" %s %s "%s" "%s" %s %d`, + host, + detect(username, "-"), + start.Format("02/Jan/2006:15:04:05 -0700"), + r.Method, + uri, + r.Proto, + detect(strconv.Itoa(l.Status()), "-"), + strconv.Itoa(l.Size()), + detect(referer, "-"), + detect(userAgent, "-"), + r.Header.Get("Request-Id"), + // response time, report in microseconds because this is consistent + // with apache's %D parameter in mod_log_config + int64(time.Since(start)/time.Microsecond)) + } } // detect detects the first presence of a non blank string and returns it diff --git a/services/httpd/response_writer.go b/services/httpd/response_writer.go index 11cc5fa5d3f..23bbdaa6af3 100644 --- a/services/httpd/response_writer.go +++ b/services/httpd/response_writer.go @@ -3,6 +3,7 @@ package httpd import ( "encoding/csv" "encoding/json" + "errors" "io" "net/http" "strconv" @@ -122,8 +123,9 @@ type jsonFormatter struct { Pretty bool } -func (f *jsonFormatter) WriteResponse(w io.Writer, resp Response) (err error) { +func (f *jsonFormatter) WriteResponse(w io.Writer, resp Response) error { var b []byte + var err error if f.Pretty { b, err = json.MarshalIndent(resp, "", " ") } else { @@ -131,12 +133,31 @@ func (f *jsonFormatter) WriteResponse(w io.Writer, resp Response) (err error) { } if err != nil { - _, err = io.WriteString(w, err.Error()) - } else { - _, err = w.Write(b) + unnestedErr := unnestError(err) + // ignore any errors in this section, we already have a 'real' error to return + resp := Response{Err: unnestedErr} + if f.Pretty { + b, _ = json.MarshalIndent(resp, "", " ") + } else { + b, _ = json.Marshal(resp) + } + w.Write(b) + w.Write([]byte("\n")) + return err + } + + _, err = w.Write(b) + if err != nil { + return err } + _, err = w.Write([]byte("\n")) + return err +} - w.Write([]byte("\n")) +func unnestError(err error) error { + for errNested := err; errNested != nil; errNested = errors.Unwrap(err) { + err = errNested + } return err } diff --git a/services/httpd/service.go b/services/httpd/service.go index 060b66f48b5..b2cf9498ad5 100644 --- a/services/httpd/service.go +++ b/services/httpd/service.go @@ -3,6 +3,7 @@ package httpd // import "github.com/influxdata/influxdb/services/httpd" import ( "bytes" + "context" "crypto/tls" "errors" "fmt" @@ -64,6 +65,8 @@ type Service struct { tlsConfig *tls.Config err chan error + httpServer http.Server + unixSocket bool unixSocketPerm uint32 unixSocketGroup int @@ -77,6 +80,7 @@ type Service struct { // NewService returns a new instance of Service. func NewService(c Config) *Service { + handler := NewHandler(c) s := &Service{ addr: c.BindAddress, https: c.HTTPSEnabled, @@ -88,8 +92,11 @@ func NewService(c Config) *Service { unixSocket: c.UnixSocketEnabled, unixSocketPerm: uint32(c.UnixSocketPermissions), bindSocket: c.BindSocket, - Handler: NewHandler(c), - Logger: zap.NewNop(), + Handler: handler, + httpServer: http.Server{ + Handler: handler, + }, + Logger: zap.NewNop(), } if s.tlsConfig == nil { s.tlsConfig = new(tls.Config) @@ -329,6 +336,13 @@ func (s *Service) register() { func (s *Service) Close() error { s.Handler.Close() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + if err := s.httpServer.Shutdown(ctx); err != nil { + return err + } + if s.ln != nil { if err := s.ln.Close(); err != nil { return err diff --git a/services/meta/data.go b/services/meta/data.go index aa2bf5aeb13..06803d4c6f5 100644 --- a/services/meta/data.go +++ b/services/meta/data.go @@ -1363,9 +1363,16 @@ func (sgi ShardGroupInfo) clone() ShardGroupInfo { return other } -// ShardFor returns the ShardInfo for a Point hash. -func (sgi *ShardGroupInfo) ShardFor(hash uint64) ShardInfo { - return sgi.Shards[hash%uint64(len(sgi.Shards))] +type hashIDer interface { + HashID() uint64 +} + +// ShardFor returns the ShardInfo for a Point or other hashIDer. +func (sgi *ShardGroupInfo) ShardFor(p hashIDer) ShardInfo { + if len(sgi.Shards) == 1 { + return sgi.Shards[0] + } + return sgi.Shards[p.HashID()%uint64(len(sgi.Shards))] } // marshal serializes to a protobuf representation. @@ -1567,7 +1574,7 @@ func (cqi *ContinuousQueryInfo) unmarshal(pb *internal.ContinuousQueryInfo) { cqi.Query = pb.GetQuery() } -var _ query.Authorizer = (*UserInfo)(nil) +var _ query.FineAuthorizer = (*UserInfo)(nil) // UserInfo represents metadata about a user in the system. type UserInfo struct { @@ -1585,7 +1592,7 @@ type UserInfo struct { } type User interface { - query.Authorizer + query.FineAuthorizer ID() string AuthorizeUnrestricted() bool } @@ -1613,7 +1620,14 @@ func (u *UserInfo) AuthorizeSeriesWrite(database string, measurement []byte, tag return true } -// AuthorizeUnrestricted allows admins to shortcut access checks. +// IsOpen is a method on FineAuthorizer to indicate all fine auth is permitted and short circuit some checks. +func (u *UserInfo) IsOpen() bool { + return true +} + +// AuthorizeUnrestricted identifies the admin user +// +// Only the pprof endpoint uses this, we should prefer to have explicit permissioning instead. func (u *UserInfo) AuthorizeUnrestricted() bool { return u.Admin } diff --git a/services/meta/internal/meta.pb.go b/services/meta/internal/meta.pb.go index 403b8683179..a294853f9e6 100644 --- a/services/meta/internal/meta.pb.go +++ b/services/meta/internal/meta.pb.go @@ -55,9 +55,13 @@ It has these top-level messages: */ package meta -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + + proto "github.com/gogo/protobuf/proto" + + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/services/meta/query_authorizer.go b/services/meta/query_authorizer.go index acf92a86631..741544c6441 100644 --- a/services/meta/query_authorizer.go +++ b/services/meta/query_authorizer.go @@ -48,7 +48,64 @@ func (a *QueryAuthorizer) AuthorizeQuery(u User, query *influxql.Query, database } } - return u.AuthorizeQuery(database, query) + // There is only one OSS implementation of the User interface, and the OSS QueryAuthorizer only works + // with the OSS UserInfo. There is a similar tight coupling between the Enterprise QueryAuthorizer and + // Enterprise UserInfo in closed-source code. + switch user := u.(type) { + case *UserInfo: + // Admin privilege allows the user to execute all statements. + if user.Admin { + return nil + } + + // Check each statement in the query. + for _, stmt := range query.Statements { + // Get the privileges required to execute the statement. + privs, err := stmt.RequiredPrivileges() + if err != nil { + return err + } + + // Make sure the user has the privileges required to execute + // each statement. + for _, p := range privs { + if p.Admin { + // Admin privilege already checked so statement requiring admin + // privilege cannot be run. + return &ErrAuthorize{ + Query: query, + User: user.Name, + Database: database, + Message: fmt.Sprintf("statement '%s', requires admin privilege", stmt), + } + } + + // Use the db name specified by the statement or the db + // name passed by the caller if one wasn't specified by + // the statement. + db := p.Name + if db == "" { + db = database + } + if !user.AuthorizeDatabase(p.Privilege, db) { + return &ErrAuthorize{ + Query: query, + User: user.Name, + Database: database, + Message: fmt.Sprintf("statement '%s', requires %s on %s", stmt, p.Privilege.String(), db), + } + } + } + } + return nil + default: + } + return &ErrAuthorize{ + Query: query, + User: u.ID(), + Database: database, + Message: fmt.Sprintf("Invalid OSS user type %T", u), + } } func (a *QueryAuthorizer) AuthorizeDatabase(u User, priv influxql.Privilege, database string) error { @@ -59,63 +116,23 @@ func (a *QueryAuthorizer) AuthorizeDatabase(u User, priv influxql.Privilege, dat } } - if !u.AuthorizeDatabase(priv, database) { - return &ErrAuthorize{ - Database: database, - Message: fmt.Sprintf("user %q, requires %s for database %q", u.ID(), priv.String(), database), + switch user := u.(type) { + case *UserInfo: + if !user.AuthorizeDatabase(priv, database) { + return &ErrAuthorize{ + Database: database, + Message: fmt.Sprintf("user %q, requires %s for database %q", u.ID(), priv.String(), database), + } } - } - - return nil -} - -func (u *UserInfo) AuthorizeQuery(database string, query *influxql.Query) error { - - // Admin privilege allows the user to execute all statements. - if u.Admin { return nil + default: } - - // Check each statement in the query. - for _, stmt := range query.Statements { - // Get the privileges required to execute the statement. - privs, err := stmt.RequiredPrivileges() - if err != nil { - return err - } - - // Make sure the user has the privileges required to execute - // each statement. - for _, p := range privs { - if p.Admin { - // Admin privilege already checked so statement requiring admin - // privilege cannot be run. - return &ErrAuthorize{ - Query: query, - User: u.Name, - Database: database, - Message: fmt.Sprintf("statement '%s', requires admin privilege", stmt), - } - } - - // Use the db name specified by the statement or the db - // name passed by the caller if one wasn't specified by - // the statement. - db := p.Name - if db == "" { - db = database - } - if !u.AuthorizeDatabase(p.Privilege, db) { - return &ErrAuthorize{ - Query: query, - User: u.Name, - Database: database, - Message: fmt.Sprintf("statement '%s', requires %s on %s", stmt, p.Privilege.String(), db), - } - } - } + return &ErrAuthorize{ + Database: database, + User: u.ID(), + Message: fmt.Sprintf("Internal error - incorrect oss user type %T", u), } - return nil + } // ErrAuthorize represents an authorization error. diff --git a/services/meta/write_authorizer.go b/services/meta/write_authorizer.go index 51f3ebd038f..541671366c3 100644 --- a/services/meta/write_authorizer.go +++ b/services/meta/write_authorizer.go @@ -19,11 +19,28 @@ func NewWriteAuthorizer(c *Client) *WriteAuthorizer { // AuthorizeWrite returns nil if the user has permission to write to the database. func (a WriteAuthorizer) AuthorizeWrite(username, database string) error { u, err := a.Client.User(username) - if err != nil || u == nil || !u.AuthorizeDatabase(influxql.WritePrivilege, database) { + if err != nil || u == nil { return &ErrAuthorize{ Database: database, Message: fmt.Sprintf("%s not authorized to write to %s", username, database), } } + // There is only one OSS implementation of the User interface, and the OSS WriteAuthorizer only works + // with the OSS UserInfo. There is a similar tight coupling between the Enterprise WriteAuthorizer and + // Enterprise UserInfo in closed-source code. + switch user := u.(type) { + case *UserInfo: + if !user.AuthorizeDatabase(influxql.WritePrivilege, database) { + return &ErrAuthorize{ + Database: database, + Message: fmt.Sprintf("%s not authorized to write to %s", username, database), + } + } + default: + return &ErrAuthorize{ + Database: database, + Message: fmt.Sprintf("Internal error - wrong type %T for oss user", u), + } + } return nil } diff --git a/services/storage/source.pb.go b/services/storage/source.pb.go index 186b9bc50c2..a5456d73c86 100644 --- a/services/storage/source.pb.go +++ b/services/storage/source.pb.go @@ -12,12 +12,17 @@ */ package storage -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" +import ( + fmt "fmt" -import io "io" + proto "github.com/gogo/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/services/storage/store.go b/services/storage/store.go index ed6b64bd807..e3c1c2b6b2c 100644 --- a/services/storage/store.go +++ b/services/storage/store.go @@ -230,7 +230,7 @@ func (s *Store) TagKeys(ctx context.Context, req *datatypes.TagKeysRequest) (cur // TODO(jsternberg): Use a real authorizer. auth := query.OpenAuthorizer - keys, err := s.TSDBStore.TagKeys(auth, shardIDs, expr) + keys, err := s.TSDBStore.TagKeys(ctx, auth, shardIDs, expr) if err != nil { return nil, err } @@ -326,7 +326,7 @@ func (s *Store) TagValues(ctx context.Context, req *datatypes.TagValuesRequest) // TODO(jsternberg): Use a real authorizer. auth := query.OpenAuthorizer - values, err := s.TSDBStore.TagValues(auth, shardIDs, expr) + values, err := s.TSDBStore.TagValues(ctx, auth, shardIDs, expr) if err != nil { return nil, err } @@ -385,7 +385,7 @@ func (s *Store) MeasurementNames(ctx context.Context, req *MeasurementNamesReque // TODO(jsternberg): Use a real authorizer. auth := query.OpenAuthorizer - values, err := s.TSDBStore.MeasurementNames(auth, database, expr) + values, err := s.TSDBStore.MeasurementNames(ctx, auth, database, expr) if err != nil { return nil, err } diff --git a/storage/reads/datatypes/predicate.pb.go b/storage/reads/datatypes/predicate.pb.go index d7de9254473..dbe4a250bba 100644 --- a/storage/reads/datatypes/predicate.pb.go +++ b/storage/reads/datatypes/predicate.pb.go @@ -8,9 +8,11 @@ import ( fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" - io "io" math "math" math_bits "math/bits" + io "io" + + _ "github.com/gogo/protobuf/gogoproto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/storage/reads/datatypes/storage_common.pb.go b/storage/reads/datatypes/storage_common.pb.go index 02bde059514..4c9b9e81f72 100644 --- a/storage/reads/datatypes/storage_common.pb.go +++ b/storage/reads/datatypes/storage_common.pb.go @@ -7,6 +7,8 @@ import ( context "context" encoding_binary "encoding/binary" fmt "fmt" + + _ "github.com/gogo/protobuf/gogoproto" types "github.com/gogo/protobuf/types" proto "github.com/golang/protobuf/proto" @@ -17,6 +19,7 @@ import ( io "io" math "math" math_bits "math/bits" + ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/stress/v2/main.go b/stress/v2/main.go index 0675809a284..ddf6ed232f6 100644 --- a/stress/v2/main.go +++ b/stress/v2/main.go @@ -6,7 +6,7 @@ import ( "time" influx "github.com/influxdata/influxdb/client/v2" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" "github.com/influxdata/influxdb/stress/v2/stressql" ) diff --git a/stress/v2/statement/exec.go b/stress/v2/statement/exec.go index b82f71c0aae..9f090bb1546 100644 --- a/stress/v2/statement/exec.go +++ b/stress/v2/statement/exec.go @@ -3,7 +3,7 @@ package statement import ( "time" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) // ExecStatement run outside scripts. This functionality is not built out diff --git a/stress/v2/statement/exec_test.go b/stress/v2/statement/exec_test.go index 06c433eac24..82e2c911c2c 100644 --- a/stress/v2/statement/exec_test.go +++ b/stress/v2/statement/exec_test.go @@ -3,7 +3,7 @@ package statement import ( "testing" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) func TestExecSetID(t *testing.T) { diff --git a/stress/v2/statement/go.go b/stress/v2/statement/go.go index e1d61e7e0f4..3e0c580873e 100644 --- a/stress/v2/statement/go.go +++ b/stress/v2/statement/go.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) // GoStatement is a Statement Implementation to allow other statements to be run concurrently diff --git a/stress/v2/statement/go_test.go b/stress/v2/statement/go_test.go index c9ebba3969b..16f1c125a33 100644 --- a/stress/v2/statement/go_test.go +++ b/stress/v2/statement/go_test.go @@ -3,7 +3,7 @@ package statement import ( "testing" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) func TestGoSetID(t *testing.T) { diff --git a/stress/v2/statement/influxql.go b/stress/v2/statement/influxql.go index 2a1eca2c4b2..9fb9ace3006 100644 --- a/stress/v2/statement/influxql.go +++ b/stress/v2/statement/influxql.go @@ -4,7 +4,7 @@ import ( "log" "time" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) // InfluxqlStatement is a Statement Implementation that allows statements that parse in InfluxQL to be passed directly to the target instance diff --git a/stress/v2/statement/influxql_test.go b/stress/v2/statement/influxql_test.go index 74c8b45077e..6504dfc2fcd 100644 --- a/stress/v2/statement/influxql_test.go +++ b/stress/v2/statement/influxql_test.go @@ -3,7 +3,7 @@ package statement import ( "testing" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) func TestInfluxQlSetID(t *testing.T) { diff --git a/stress/v2/statement/insert.go b/stress/v2/statement/insert.go index bfa0b242f2c..73e641f3c29 100644 --- a/stress/v2/statement/insert.go +++ b/stress/v2/statement/insert.go @@ -9,7 +9,7 @@ import ( "sync" "time" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) // InsertStatement is a Statement Implementation that creates points to be written to the target InfluxDB instance diff --git a/stress/v2/statement/insert_test.go b/stress/v2/statement/insert_test.go index 4fc04182327..9a1f22669d1 100644 --- a/stress/v2/statement/insert_test.go +++ b/stress/v2/statement/insert_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) func TestInsertSetID(t *testing.T) { diff --git a/stress/v2/statement/query.go b/stress/v2/statement/query.go index b9d9032e0f8..179626d6205 100644 --- a/stress/v2/statement/query.go +++ b/stress/v2/statement/query.go @@ -6,7 +6,7 @@ import ( "time" "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) // QueryStatement is a Statement Implementation to run queries on the target InfluxDB instance diff --git a/stress/v2/statement/query_test.go b/stress/v2/statement/query_test.go index b9b607f8f49..4aa3ce795d0 100644 --- a/stress/v2/statement/query_test.go +++ b/stress/v2/statement/query_test.go @@ -3,7 +3,7 @@ package statement import ( "testing" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) func TestQuerySetID(t *testing.T) { diff --git a/stress/v2/statement/set.go b/stress/v2/statement/set.go index 825c74f3b11..0af5794d6aa 100644 --- a/stress/v2/statement/set.go +++ b/stress/v2/statement/set.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) // SetStatement set state variables for the test diff --git a/stress/v2/statement/set_test.go b/stress/v2/statement/set_test.go index c6c9febb4f5..88d1ea03600 100644 --- a/stress/v2/statement/set_test.go +++ b/stress/v2/statement/set_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) func TestSetSetID(t *testing.T) { diff --git a/stress/v2/statement/statement.go b/stress/v2/statement/statement.go index 53cd40060fe..81afaf746e6 100644 --- a/stress/v2/statement/statement.go +++ b/stress/v2/statement/statement.go @@ -4,7 +4,7 @@ import ( "log" "strconv" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) // Statement is the common interface to shape the testing environment and prepare database requests diff --git a/stress/v2/statement/wait.go b/stress/v2/statement/wait.go index e047761d868..c0966daf517 100644 --- a/stress/v2/statement/wait.go +++ b/stress/v2/statement/wait.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) // WaitStatement is a Statement Implementation to prevent the test from returning to early when running GoStatements diff --git a/stress/v2/statement/wait_test.go b/stress/v2/statement/wait_test.go index 5ad0b32a9cc..255ea3d4dd8 100644 --- a/stress/v2/statement/wait_test.go +++ b/stress/v2/statement/wait_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "github.com/influxdata/influxdb/stress/v2/stress_client" + stressClient "github.com/influxdata/influxdb/stress/v2/stress_client" ) func TestWaitSetID(t *testing.T) { diff --git a/tcp/mux.go b/tcp/mux.go index 25dae90d213..38f2d9dc0c7 100644 --- a/tcp/mux.go +++ b/tcp/mux.go @@ -79,6 +79,7 @@ func (mux *Mux) Serve(ln net.Listener) error { continue } if err != nil { + mux.Logger.Printf("tcp.Mux: Listener at %s failed failed to accept a connection, closing all listeners - %s", ln.Addr(), err) // Wait for all connections to be demux mux.wg.Wait() @@ -90,7 +91,9 @@ func (mux *Mux) Serve(ln net.Listener) error { wg.Add(1) go func(ln *listener) { defer wg.Done() - ln.Close() + if err := ln.Close(); err != nil { + mux.Logger.Printf("tcp.Mux: Closing the listener at %s failed - %s", ln.Addr().String(), err) + } }(ln) } mux.mu.RUnlock() @@ -100,9 +103,10 @@ func (mux *Mux) Serve(ln net.Listener) error { dl := mux.defaultListener mux.mu.RUnlock() if dl != nil { - dl.Close() + if closeErr := dl.Close(); closeErr != nil { + mux.Logger.Printf("tcp.Mux: Closing the default listener at %s failed - %s", ln.Addr().String(), closeErr) + } } - return err } diff --git a/tests/backup_restore_test.go b/tests/backup_restore_test.go index 4d3bee6ae64..a215e7320dd 100644 --- a/tests/backup_restore_test.go +++ b/tests/backup_restore_test.go @@ -1,19 +1,18 @@ package tests import ( + "fmt" "io/ioutil" "net" "os" "path/filepath" + "strings" "testing" "time" - "fmt" - "github.com/influxdata/influxdb/cmd/influxd/backup" "github.com/influxdata/influxdb/cmd/influxd/restore" "github.com/influxdata/influxdb/toml" - "strings" ) func TestServer_BackupAndRestore(t *testing.T) { diff --git a/tests/server_concurrent_test.go b/tests/server_concurrent_test.go index ac9e989ee42..8b6178958c2 100644 --- a/tests/server_concurrent_test.go +++ b/tests/server_concurrent_test.go @@ -1,6 +1,7 @@ package tests import ( + "context" "fmt" "strings" "sync" @@ -95,7 +96,7 @@ func TestConcurrentServer_TagValues(t *testing.T) { ids = append(ids, si.ID) } } - srv.TSDBStore.TagValues(nil, ids, cond) + srv.TSDBStore.TagValues(context.Background(), nil, ids, cond) } var f3 = func() { s.DropDatabase("db0") } @@ -133,7 +134,7 @@ func TestConcurrentServer_ShowMeasurements(t *testing.T) { if !ok { t.Fatal("Not a local server") } - srv.TSDBStore.MeasurementNames(query.OpenAuthorizer, "db0", nil) + srv.TSDBStore.MeasurementNames(context.Background(), query.OpenAuthorizer, "db0", nil) } runTest(10*time.Second, f1, f2) diff --git a/tsdb/config.go b/tsdb/config.go index bcc4fd4e719..025a84713ce 100644 --- a/tsdb/config.go +++ b/tsdb/config.go @@ -93,6 +93,9 @@ type Config struct { // Enables unicode validation on series keys on write. ValidateKeys bool `toml:"validate-keys"` + // Enables strict error handling. For example, forces SELECT INTO to err out on INF values. + StrictErrorHandling bool `toml:"strict-error-handling"` + // Query logging QueryLogEnabled bool `toml:"query-log-enabled"` @@ -155,7 +158,8 @@ func NewConfig() Config { Engine: DefaultEngine, Index: DefaultIndex, - QueryLogEnabled: true, + StrictErrorHandling: false, + QueryLogEnabled: true, CacheMaxMemorySize: toml.Size(DefaultCacheMaxMemorySize), CacheSnapshotMemorySize: toml.Size(DefaultCacheSnapshotMemorySize), @@ -229,6 +233,7 @@ func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) { "dir": c.Dir, "wal-dir": c.WALDir, "wal-fsync-delay": c.WALFsyncDelay, + "strict-error-handling": c.StrictErrorHandling, "cache-max-memory-size": c.CacheMaxMemorySize, "cache-snapshot-memory-size": c.CacheSnapshotMemorySize, "cache-snapshot-write-cold-duration": c.CacheSnapshotWriteColdDuration, diff --git a/tsdb/engine.go b/tsdb/engine.go index 4f2d0a3511a..eb14c18cda8 100644 --- a/tsdb/engine.go +++ b/tsdb/engine.go @@ -41,7 +41,7 @@ type Engine interface { LoadMetadataIndex(shardID uint64, index Index) error - CreateSnapshot() (string, error) + CreateSnapshot(skipCacheOk bool) (string, error) Backup(w io.Writer, basePath string, since time.Time) error Export(w io.Writer, basePath string, start time.Time, end time.Time) error Restore(r io.Reader, basePath string) error diff --git a/tsdb/engine/tsm1/engine.go b/tsdb/engine/tsm1/engine.go index 51afc508ac6..18af444f7b8 100644 --- a/tsdb/engine/tsm1/engine.go +++ b/tsdb/engine/tsm1/engine.go @@ -87,6 +87,9 @@ const ( // deleteFlushThreshold is the size in bytes of a batch of series keys to delete. deleteFlushThreshold = 50 * 1024 * 1024 + + // DoNotCompactFile is the name of the file that disables compactions. + DoNotCompactFile = "do_not_compact" ) // Statistics gathered by the engine. @@ -909,26 +912,16 @@ func (e *Engine) Free() error { // of the files in the archive. It will force a snapshot of the WAL first // then perform the backup with a read lock against the file store. This means // that new TSM files will not be able to be created in this shard while the -// backup is running. For shards that are still acively getting writes, this -// could cause the WAL to backup, increasing memory usage and evenutally rejecting writes. +// backup is running. For shards that are still actively getting writes, this +// could cause the WAL to backup, increasing memory usage and eventually rejecting writes. func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error { var err error var path string - for i := 0; i < 3; i++ { - path, err = e.CreateSnapshot() - if err != nil { - switch err { - case ErrSnapshotInProgress: - backoff := time.Duration(math.Pow(32, float64(i))) * time.Millisecond - time.Sleep(backoff) - default: - return err - } - } - } - if err == ErrSnapshotInProgress { - e.logger.Warn("Snapshotter busy: Backup proceeding without snapshot contents.") + path, err = e.CreateSnapshot(true) + if err != nil { + return err } + // Remove the temporary snapshot dir defer func() { if err := os.RemoveAll(path); err != nil { @@ -945,7 +938,6 @@ func (e *Engine) timeStampFilterTarFile(start, end time.Time) func(f os.FileInfo return intar.StreamFile(fi, shardRelativePath, fullPath, tw) } - var tombstonePath string f, err := os.Open(fullPath) if err != nil { return err @@ -956,9 +948,8 @@ func (e *Engine) timeStampFilterTarFile(start, end time.Time) func(f os.FileInfo } // Grab the tombstone file if one exists. - if r.HasTombstones() { - tombstonePath = filepath.Base(r.TombstoneFiles()[0].Path) - return intar.StreamFile(fi, shardRelativePath, tombstonePath, tw) + if ts := r.TombstoneStats(); ts.TombstoneExists { + return intar.StreamFile(fi, shardRelativePath, filepath.Base(ts.Path), tw) } min, max := r.TimeRange() @@ -995,7 +986,7 @@ func (e *Engine) timeStampFilterTarFile(start, end time.Time) func(f os.FileInfo } func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.Time) error { - path, err := e.CreateSnapshot() + path, err := e.CreateSnapshot(false) if err != nil { return err } @@ -1955,9 +1946,19 @@ func (e *Engine) WriteSnapshot() (err error) { } // CreateSnapshot will create a temp directory that holds -// temporary hardlinks to the underylyng shard files. -func (e *Engine) CreateSnapshot() (string, error) { - if err := e.WriteSnapshot(); err != nil { +// temporary hardlinks to the underlying shard files. +// skipCacheOk controls whether it is permissible to fail writing out +// in-memory cache data when a previous snapshot is in progress +func (e *Engine) CreateSnapshot(skipCacheOk bool) (string, error) { + err := e.WriteSnapshot() + for i := 0; (i < 3) && (err == ErrSnapshotInProgress); i += 1 { + backoff := time.Duration(math.Pow(32, float64(i))) * time.Millisecond + time.Sleep(backoff) + err = e.WriteSnapshot() + } + if (err == ErrSnapshotInProgress) && skipCacheOk { + e.logger.Warn("Snapshotter busy: proceeding without cache contents.") + } else if err != nil { return "", err } @@ -2066,6 +2067,8 @@ func (e *Engine) compact(wg *sync.WaitGroup) { t := time.NewTicker(time.Second) defer t.Stop() + var nextDisabledMsg time.Time + for { e.mu.RLock() quit := e.done @@ -2076,6 +2079,17 @@ func (e *Engine) compact(wg *sync.WaitGroup) { return case <-t.C: + // See if compactions are disabled. + doNotCompactFile := filepath.Join(e.Path(), DoNotCompactFile) + _, err := os.Stat(doNotCompactFile) + if err == nil { + now := time.Now() + if now.After(nextDisabledMsg) { + e.logger.Info("TSM compaction disabled", logger.Shard(e.id), zap.String("reason", doNotCompactFile)) + nextDisabledMsg = now.Add(time.Minute * 15) + } + continue + } // Find our compaction plans level1Groups := e.CompactionPlan.PlanLevel(1) @@ -2240,7 +2254,7 @@ func (s *compactionStrategy) Apply() { // compactGroup executes the compaction strategy against a single CompactionGroup. func (s *compactionStrategy) compactGroup() { group := s.group - log, logEnd := logger.NewOperation(s.logger, "TSM compaction", "tsm1_compact_group") + log, logEnd := logger.NewOperation(s.logger, "TSM compaction", "tsm1_compact_group", logger.Shard(s.engine.id)) defer logEnd() log.Info("Beginning compaction", zap.Int("tsm1_files_n", len(group))) diff --git a/tsdb/engine/tsm1/engine_internal_test.go b/tsdb/engine/tsm1/engine_internal_test.go new file mode 100644 index 00000000000..65ebd0fdfb4 --- /dev/null +++ b/tsdb/engine/tsm1/engine_internal_test.go @@ -0,0 +1,117 @@ +package tsm1 + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/index/inmem" +) + +func TestEngine_ConcurrentShardSnapshots(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping on windows") + } + + tmpDir, err := ioutil.TempDir("", "shard_test") + if err != nil { + t.Fatalf("error creating temporary directory: %s", err.Error()) + } + defer os.RemoveAll(tmpDir) + tmpShard := filepath.Join(tmpDir, "shard") + tmpWal := filepath.Join(tmpDir, "wal") + + sfile := NewSeriesFile(tmpDir) + defer sfile.Close() + + opts := tsdb.NewEngineOptions() + opts.Config.WALDir = filepath.Join(tmpDir, "wal") + opts.InmemIndex = inmem.NewIndex(filepath.Base(tmpDir), sfile) + opts.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{}) + + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile, opts) + if err := sh.Open(); err != nil { + t.Fatalf("error opening shard: %s", err.Error()) + } + defer sh.Close() + + points := make([]models.Point, 0, 10000) + for i := 0; i < cap(points); i++ { + points = append(points, models.MustNewPoint( + "cpu", + models.NewTags(map[string]string{"host": "server"}), + map[string]interface{}{"value": 1.0}, + time.Unix(int64(i), 0), + )) + } + err = sh.WritePoints(points) + if err != nil { + t.Fatalf(err.Error()) + } + + engineInterface, err := sh.Engine() + if err != nil { + t.Fatalf("error retrieving shard.Engine(): %s", err.Error()) + } + + // Get the struct underlying the interface. Not a recommended practice. + realEngineStruct, ok := (engineInterface).(*Engine) + if !ok { + t.Log("Engine type does not permit simulating Cache race conditions") + return + } + // fake a race condition in snapshotting the cache. + realEngineStruct.Cache.snapshotting = true + defer func() { + realEngineStruct.Cache.snapshotting = false + }() + + snapshotFunc := func(skipCacheOk bool) { + if f, err := sh.CreateSnapshot(skipCacheOk); err == nil { + if err = os.RemoveAll(f); err != nil { + t.Fatalf("Failed to clean up in TestEngine_ConcurrentShardSnapshots: %s", err.Error()) + } + } else if err == ErrSnapshotInProgress { + if skipCacheOk { + t.Fatalf("failing to ignore this error,: %s", err.Error()) + } + } else if err != nil { + t.Fatalf("error creating shard snapshot: %s", err.Error()) + } + } + + // Permit skipping cache in the snapshot + snapshotFunc(true) + // do not permit skipping the cache in the snapshot + snapshotFunc(false) + realEngineStruct.Cache.snapshotting = false +} + +// NewSeriesFile returns a new instance of SeriesFile with a temporary file path. +func NewSeriesFile(tmpDir string) *tsdb.SeriesFile { + dir, err := ioutil.TempDir(tmpDir, "tsdb-series-file-") + if err != nil { + panic(err) + } + f := tsdb.NewSeriesFile(dir) + f.Logger = logger.New(os.Stdout) + if err := f.Open(); err != nil { + panic(err) + } + return f +} + +type seriesIDSets []*tsdb.SeriesIDSet + +func (a seriesIDSets) ForEach(f func(ids *tsdb.SeriesIDSet)) error { + for _, v := range a { + f(v) + } + return nil +} diff --git a/tsdb/engine/tsm1/file_store.go b/tsdb/engine/tsm1/file_store.go index e5220b74916..09c19d348a5 100644 --- a/tsdb/engine/tsm1/file_store.go +++ b/tsdb/engine/tsm1/file_store.go @@ -110,9 +110,9 @@ type TSMFile interface { // HasTombstones returns true if file contains values that have been deleted. HasTombstones() bool - // TombstoneFiles returns the tombstone filestats if there are any tombstones + // TombstoneStats returns the tombstone filestats if there are any tombstones // written for this file. - TombstoneFiles() []FileStat + TombstoneStats() TombstoneStat // Close closes the underlying file resources. Close() error @@ -121,7 +121,7 @@ type TSMFile interface { Size() uint32 // Rename renames the existing TSM file to a new name and replaces the mmap backing slice using the new - // file name. Index and Reader state are not re-initialized. + // file name. Index and Reader state are not re-initialized. Rename(path string) error // Remove deletes the file from the filesystem. @@ -205,6 +205,14 @@ type FileStat struct { MinKey, MaxKey []byte } +// TombstoneStat holds information about a possible tombstone file on disk. +type TombstoneStat struct { + TombstoneExists bool + Path string + LastModified int64 + Size uint32 +} + // OverlapsTimeRange returns true if the time range of the file intersect min and max. func (f FileStat) OverlapsTimeRange(min, max int64) bool { return f.MinTime <= max && f.MaxTime >= min @@ -569,7 +577,7 @@ func (f *FileStore) Open() error { // Accumulate file store size stats atomic.AddInt64(&f.stats.DiskBytes, int64(res.r.Size())) - for _, ts := range res.r.TombstoneFiles() { + if ts := res.r.TombstoneStats(); ts.TombstoneExists { atomic.AddInt64(&f.stats.DiskBytes, int64(ts.Size)) } @@ -802,8 +810,8 @@ func (f *FileStore) replace(oldFiles, newFiles []string, updatedFn func(r []TSMF return err } - for _, t := range file.TombstoneFiles() { - if err := f.obs.FileUnlinking(t.Path); err != nil { + if ts := file.TombstoneStats(); ts.TombstoneExists { + if err := f.obs.FileUnlinking(ts.Path); err != nil { return err } } @@ -821,8 +829,8 @@ func (f *FileStore) replace(oldFiles, newFiles []string, updatedFn func(r []TSMF if file.InUse() { // Copy all the tombstones related to this TSM file var deletes []string - for _, t := range file.TombstoneFiles() { - deletes = append(deletes, t.Path) + if ts := file.TombstoneStats(); ts.TombstoneExists { + deletes = append(deletes, ts.Path) } // Rename the TSM file used by this reader @@ -884,10 +892,9 @@ func (f *FileStore) replace(oldFiles, newFiles []string, updatedFn func(r []TSMF var totalSize int64 for _, file := range f.files { totalSize += int64(file.Size()) - for _, ts := range file.TombstoneFiles() { + if ts := file.TombstoneStats(); ts.TombstoneExists { totalSize += int64(ts.Size) } - } atomic.StoreInt64(&f.stats.DiskBytes, totalSize) @@ -1048,7 +1055,7 @@ func (f *FileStore) MakeSnapshotLinks(destPath string, files []TSMFile) error { if err := os.Link(tsmf.Path(), newpath); err != nil { return fmt.Errorf("error creating tsm hard link: %q", err) } - for _, tf := range tsmf.TombstoneFiles() { + if tf := tsmf.TombstoneStats(); tf.TombstoneExists { newpath := filepath.Join(destPath, filepath.Base(tf.Path)) if err := os.Link(tf.Path, newpath); err != nil { return fmt.Errorf("error creating tombstone hard link: %q", err) @@ -1091,7 +1098,7 @@ func (f *FileStore) CreateSnapshot() (string, error) { if err := os.RemoveAll(tmpPath); err != nil { // report if, for some reason, we couldn't remove our temporary // directory. - return "", fmt.Errorf("CreateSnapshot() failed to create links and failed to remove temporary direcotry %v: %w", tmpPath, err) + return "", fmt.Errorf("CreateSnapshot() failed to create links and failed to remove temporary direcotry %v: %v", tmpPath, err) } return "", err } diff --git a/tsdb/engine/tsm1/file_store_key_iterator_test.go b/tsdb/engine/tsm1/file_store_key_iterator_test.go index d9e22db2100..abb8a879161 100644 --- a/tsdb/engine/tsm1/file_store_key_iterator_test.go +++ b/tsdb/engine/tsm1/file_store_key_iterator_test.go @@ -149,34 +149,36 @@ func (t *mockTSMFile) KeyAt(idx int) ([]byte, byte) { return []byte(t.keys[idx]), BlockFloat64 } -func (*mockTSMFile) Path() string { panic("implement me") } -func (*mockTSMFile) Read(key []byte, t int64) ([]Value, error) { panic("implement me") } -func (*mockTSMFile) ReadAt(entry *IndexEntry, values []Value) ([]Value, error) { panic("implement me") } -func (*mockTSMFile) Entries(key []byte) []IndexEntry { panic("implement me") } -func (*mockTSMFile) ReadEntries(key []byte, entries *[]IndexEntry) []IndexEntry { panic("implement me") } -func (*mockTSMFile) ContainsValue(key []byte, t int64) bool { panic("implement me") } -func (*mockTSMFile) Contains(key []byte) bool { panic("implement me") } -func (*mockTSMFile) OverlapsTimeRange(min, max int64) bool { panic("implement me") } -func (*mockTSMFile) OverlapsKeyRange(min, max []byte) bool { panic("implement me") } -func (*mockTSMFile) TimeRange() (int64, int64) { panic("implement me") } -func (*mockTSMFile) TombstoneRange(key []byte) []TimeRange { panic("implement me") } -func (*mockTSMFile) KeyRange() ([]byte, []byte) { panic("implement me") } -func (*mockTSMFile) Type(key []byte) (byte, error) { panic("implement me") } -func (*mockTSMFile) BatchDelete() BatchDeleter { panic("implement me") } -func (*mockTSMFile) Delete(keys [][]byte) error { panic("implement me") } -func (*mockTSMFile) DeleteRange(keys [][]byte, min, max int64) error { panic("implement me") } -func (*mockTSMFile) HasTombstones() bool { panic("implement me") } -func (*mockTSMFile) TombstoneFiles() []FileStat { panic("implement me") } -func (*mockTSMFile) Close() error { panic("implement me") } -func (*mockTSMFile) Size() uint32 { panic("implement me") } -func (*mockTSMFile) Rename(path string) error { panic("implement me") } -func (*mockTSMFile) Remove() error { panic("implement me") } -func (*mockTSMFile) InUse() bool { panic("implement me") } -func (*mockTSMFile) Ref() { panic("implement me") } -func (*mockTSMFile) Unref() { panic("implement me") } -func (*mockTSMFile) Stats() FileStat { panic("implement me") } -func (*mockTSMFile) BlockIterator() *BlockIterator { panic("implement me") } -func (*mockTSMFile) Free() error { panic("implement me") } +func (*mockTSMFile) Path() string { panic("implement me") } +func (*mockTSMFile) Read(key []byte, t int64) ([]Value, error) { panic("implement me") } +func (*mockTSMFile) ReadAt(entry *IndexEntry, values []Value) ([]Value, error) { panic("implement me") } +func (*mockTSMFile) Entries(key []byte) []IndexEntry { panic("implement me") } +func (*mockTSMFile) ReadEntries(key []byte, entries *[]IndexEntry) []IndexEntry { + panic("implement me") +} +func (*mockTSMFile) ContainsValue(key []byte, t int64) bool { panic("implement me") } +func (*mockTSMFile) Contains(key []byte) bool { panic("implement me") } +func (*mockTSMFile) OverlapsTimeRange(min, max int64) bool { panic("implement me") } +func (*mockTSMFile) OverlapsKeyRange(min, max []byte) bool { panic("implement me") } +func (*mockTSMFile) TimeRange() (int64, int64) { panic("implement me") } +func (*mockTSMFile) TombstoneRange(key []byte) []TimeRange { panic("implement me") } +func (*mockTSMFile) KeyRange() ([]byte, []byte) { panic("implement me") } +func (*mockTSMFile) Type(key []byte) (byte, error) { panic("implement me") } +func (*mockTSMFile) BatchDelete() BatchDeleter { panic("implement me") } +func (*mockTSMFile) Delete(keys [][]byte) error { panic("implement me") } +func (*mockTSMFile) DeleteRange(keys [][]byte, min, max int64) error { panic("implement me") } +func (*mockTSMFile) HasTombstones() bool { panic("implement me") } +func (*mockTSMFile) TombstoneStats() TombstoneStat { panic("implement me") } +func (*mockTSMFile) Close() error { panic("implement me") } +func (*mockTSMFile) Size() uint32 { panic("implement me") } +func (*mockTSMFile) Rename(path string) error { panic("implement me") } +func (*mockTSMFile) Remove() error { panic("implement me") } +func (*mockTSMFile) InUse() bool { panic("implement me") } +func (*mockTSMFile) Ref() { panic("implement me") } +func (*mockTSMFile) Unref() { panic("implement me") } +func (*mockTSMFile) Stats() FileStat { panic("implement me") } +func (*mockTSMFile) BlockIterator() *BlockIterator { panic("implement me") } +func (*mockTSMFile) Free() error { panic("implement me") } func (*mockTSMFile) ReadFloatBlockAt(*IndexEntry, *[]FloatValue) ([]FloatValue, error) { panic("implement me") diff --git a/tsdb/engine/tsm1/file_store_test.go b/tsdb/engine/tsm1/file_store_test.go index f81a2cdcba1..0e5cf54353a 100644 --- a/tsdb/engine/tsm1/file_store_test.go +++ b/tsdb/engine/tsm1/file_store_test.go @@ -2739,8 +2739,8 @@ func TestFileStore_CreateSnapshot(t *testing.T) { if _, err := os.Stat(p); os.IsNotExist(err) { t.Fatalf("unable to find file %q", p) } - for _, tf := range f.TombstoneFiles() { - p := filepath.Join(s, filepath.Base(tf.Path)) + if ts := f.TombstoneStats(); ts.TombstoneExists { + p := filepath.Join(s, filepath.Base(ts.Path)) t.Logf("checking for existence of hard link %q", p) if _, err := os.Stat(p); os.IsNotExist(err) { t.Fatalf("unable to find file %q", p) diff --git a/tsdb/engine/tsm1/reader.go b/tsdb/engine/tsm1/reader.go index 67b28d9b26c..0fc840fa661 100644 --- a/tsdb/engine/tsm1/reader.go +++ b/tsdb/engine/tsm1/reader.go @@ -524,7 +524,7 @@ func (t *TSMReader) Size() uint32 { func (t *TSMReader) LastModified() int64 { t.mu.RLock() lm := t.lastModified - for _, ts := range t.tombstoner.TombstoneFiles() { + if ts := t.tombstoner.TombstoneStats(); ts.TombstoneExists { if ts.LastModified > lm { lm = ts.LastModified } @@ -542,9 +542,9 @@ func (t *TSMReader) HasTombstones() bool { } // TombstoneFiles returns any tombstone files associated with this TSM file. -func (t *TSMReader) TombstoneFiles() []FileStat { +func (t *TSMReader) TombstoneStats() TombstoneStat { t.mu.RLock() - fs := t.tombstoner.TombstoneFiles() + fs := t.tombstoner.TombstoneStats() t.mu.RUnlock() return fs } diff --git a/tsdb/engine/tsm1/reader_test.go b/tsdb/engine/tsm1/reader_test.go index 8e5a636ed13..e5c0bb42848 100644 --- a/tsdb/engine/tsm1/reader_test.go +++ b/tsdb/engine/tsm1/reader_test.go @@ -8,6 +8,8 @@ import ( "path/filepath" "sort" "testing" + + "github.com/stretchr/testify/require" ) func fatal(t *testing.T, msg string, err error) { @@ -465,9 +467,7 @@ func TestTSMReader_MMAP_TombstoneOutsideTimeRange(t *testing.T) { t.Fatalf("HasTombstones mismatch: got %v, exp %v", got, exp) } - if got, exp := len(r.TombstoneFiles()), 0; got != exp { - t.Fatalf("TombstoneFiles len mismatch: got %v, exp %v", got, exp) - } + require.False(t, r.TombstoneStats().TombstoneExists) } func TestTSMReader_MMAP_TombstoneOutsideKeyRange(t *testing.T) { @@ -529,10 +529,7 @@ func TestTSMReader_MMAP_TombstoneOutsideKeyRange(t *testing.T) { t.Fatalf("HasTombstones mismatch: got %v, exp %v", got, exp) } - if got, exp := len(r.TombstoneFiles()), 0; got != exp { - t.Fatalf("TombstoneFiles len mismatch: got %v, exp %v", got, exp) - - } + require.False(t, r.TombstoneStats().TombstoneExists) } func TestTSMReader_MMAP_TombstoneOverlapKeyRange(t *testing.T) { @@ -598,9 +595,7 @@ func TestTSMReader_MMAP_TombstoneOverlapKeyRange(t *testing.T) { t.Fatalf("HasTombstones mismatch: got %v, exp %v", got, exp) } - if got, exp := len(r.TombstoneFiles()), 1; got != exp { - t.Fatalf("TombstoneFiles len mismatch: got %v, exp %v", got, exp) - } + require.True(t, r.TombstoneStats().TombstoneExists) } func TestTSMReader_MMAP_TombstoneFullRange(t *testing.T) { diff --git a/tsdb/engine/tsm1/ring_test.go b/tsdb/engine/tsm1/ring_test.go index 394de7246e1..c8bea3abf13 100644 --- a/tsdb/engine/tsm1/ring_test.go +++ b/tsdb/engine/tsm1/ring_test.go @@ -79,7 +79,9 @@ func benchmarkRingGetPartition(b *testing.B, r *ring, keys int) { } } -func BenchmarkRing_getPartition_100(b *testing.B) { benchmarkRingGetPartition(b, MustNewRing(256), 100) } +func BenchmarkRing_getPartition_100(b *testing.B) { + benchmarkRingGetPartition(b, MustNewRing(256), 100) +} func BenchmarkRing_getPartition_1000(b *testing.B) { benchmarkRingGetPartition(b, MustNewRing(256), 1000) } diff --git a/tsdb/engine/tsm1/tombstone.go b/tsdb/engine/tsm1/tombstone.go index 8e3971cc3cd..7189b1ef80e 100644 --- a/tsdb/engine/tsm1/tombstone.go +++ b/tsdb/engine/tsm1/tombstone.go @@ -38,15 +38,14 @@ type Tombstoner struct { FilterFn func(k []byte) bool + // Tombstones that have been written but not flushed to disk yet. + tombstones []Tombstone // cache of the stats for this tombstone - fileStats []FileStat + tombstoneStats TombstoneStat // indicates that the stats may be out of sync with what is on disk and they // should be refreshed. statsLoaded bool - // Tombstones that have been written but not flushed to disk yet. - tombstones []Tombstone - // These are references used for pending writes that have not been committed. If // these are nil, then no pending writes are in progress. gz *gzip.Writer @@ -183,43 +182,52 @@ func (t *Tombstoner) Delete() error { // HasTombstones return true if there are any tombstone entries recorded. func (t *Tombstoner) HasTombstones() bool { - files := t.TombstoneFiles() + stats := t.TombstoneStats() + if !stats.TombstoneExists { + return false + } + if stats.Size > 0 { + return true + } + t.mu.RLock() n := len(t.tombstones) t.mu.RUnlock() - return len(files) > 0 && files[0].Size > 0 || n > 0 + return n > 0 } // TombstoneFiles returns any tombstone files associated with Tombstoner's TSM file. -func (t *Tombstoner) TombstoneFiles() []FileStat { +func (t *Tombstoner) TombstoneStats() TombstoneStat { t.mu.RLock() if t.statsLoaded { - stats := t.fileStats + stats := t.tombstoneStats t.mu.RUnlock() return stats } t.mu.RUnlock() stat, err := os.Stat(t.tombstonePath()) - if os.IsNotExist(err) || err != nil { + if err != nil { t.mu.Lock() // The file doesn't exist so record that we tried to load it so // we don't continue to keep trying. This is the common case. t.statsLoaded = os.IsNotExist(err) - t.fileStats = t.fileStats[:0] + t.tombstoneStats.TombstoneExists = false + stats := t.tombstoneStats t.mu.Unlock() - return nil + return stats } t.mu.Lock() - t.fileStats = append(t.fileStats[:0], FileStat{ - Path: t.tombstonePath(), - LastModified: stat.ModTime().UnixNano(), - Size: uint32(stat.Size()), - }) + t.tombstoneStats = TombstoneStat{ + TombstoneExists: true, + Path: t.tombstonePath(), + LastModified: stat.ModTime().UnixNano(), + Size: uint32(stat.Size()), + } t.statsLoaded = true - stats := t.fileStats + stats := t.tombstoneStats t.mu.Unlock() return stats diff --git a/tsdb/engine/tsm1/tombstone_test.go b/tsdb/engine/tsm1/tombstone_test.go index 26d0ad3eeb3..330451d4dd9 100644 --- a/tsdb/engine/tsm1/tombstone_test.go +++ b/tsdb/engine/tsm1/tombstone_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "github.com/stretchr/testify/require" ) func TestTombstoner_Add(t *testing.T) { @@ -21,10 +22,8 @@ func TestTombstoner_Add(t *testing.T) { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } - stats := ts.TombstoneFiles() - if got, exp := len(stats), 0; got != exp { - t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) - } + stats := ts.TombstoneStats() + require.False(t, stats.TombstoneExists) ts.Add([][]byte{[]byte("foo")}) @@ -33,22 +32,11 @@ func TestTombstoner_Add(t *testing.T) { } entries = mustReadAll(ts) - stats = ts.TombstoneFiles() - if got, exp := len(stats), 1; got != exp { - t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) - } - - if stats[0].Size == 0 { - t.Fatalf("got size %v, exp > 0", stats[0].Size) - } - - if stats[0].LastModified == 0 { - t.Fatalf("got lastModified %v, exp > 0", stats[0].LastModified) - } - - if stats[0].Path == "" { - t.Fatalf("got path %v, exp != ''", stats[0].Path) - } + stats = ts.TombstoneStats() + require.True(t, stats.TombstoneExists) + require.NotZero(t, stats.Size) + require.NotZero(t, stats.LastModified) + require.NotEmpty(t, stats.Path) if got, exp := len(entries), 1; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) @@ -82,10 +70,8 @@ func TestTombstoner_Add_LargeKey(t *testing.T) { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } - stats := ts.TombstoneFiles() - if got, exp := len(stats), 0; got != exp { - t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) - } + stats := ts.TombstoneStats() + require.False(t, stats.TombstoneExists) key := bytes.Repeat([]byte{'a'}, 4096) ts.Add([][]byte{key}) @@ -95,22 +81,11 @@ func TestTombstoner_Add_LargeKey(t *testing.T) { } entries = mustReadAll(ts) - stats = ts.TombstoneFiles() - if got, exp := len(stats), 1; got != exp { - t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) - } - - if stats[0].Size == 0 { - t.Fatalf("got size %v, exp > 0", stats[0].Size) - } - - if stats[0].LastModified == 0 { - t.Fatalf("got lastModified %v, exp > 0", stats[0].LastModified) - } - - if stats[0].Path == "" { - t.Fatalf("got path %v, exp != ''", stats[0].Path) - } + stats = ts.TombstoneStats() + require.True(t, stats.TombstoneExists) + require.NotZero(t, stats.Size) + require.NotZero(t, stats.LastModified) + require.NotEmpty(t, stats.Path) if got, exp := len(entries), 1; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) @@ -144,10 +119,8 @@ func TestTombstoner_Add_Multiple(t *testing.T) { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } - stats := ts.TombstoneFiles() - if got, exp := len(stats), 0; got != exp { - t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) - } + stats := ts.TombstoneStats() + require.False(t, stats.TombstoneExists) ts.Add([][]byte{[]byte("foo")}) @@ -162,22 +135,11 @@ func TestTombstoner_Add_Multiple(t *testing.T) { } entries = mustReadAll(ts) - stats = ts.TombstoneFiles() - if got, exp := len(stats), 1; got != exp { - t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) - } - - if stats[0].Size == 0 { - t.Fatalf("got size %v, exp > 0", stats[0].Size) - } - - if stats[0].LastModified == 0 { - t.Fatalf("got lastModified %v, exp > 0", stats[0].LastModified) - } - - if stats[0].Path == "" { - t.Fatalf("got path %v, exp != ''", stats[0].Path) - } + stats = ts.TombstoneStats() + require.True(t, stats.TombstoneExists) + require.NotZero(t, stats.Size) + require.NotZero(t, stats.LastModified) + require.NotEmpty(t, stats.Path) if got, exp := len(entries), 2; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) @@ -233,11 +195,8 @@ func TestTombstoner_Add_Empty(t *testing.T) { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } - stats := ts.TombstoneFiles() - if got, exp := len(stats), 0; got != exp { - t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) - } - + stats := ts.TombstoneStats() + require.False(t, stats.TombstoneExists) } func TestTombstoner_Delete(t *testing.T) { @@ -268,10 +227,8 @@ func TestTombstoner_Delete(t *testing.T) { fatal(t, "delete tombstone", err) } - stats := ts.TombstoneFiles() - if got, exp := len(stats), 0; got != exp { - t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) - } + stats := ts.TombstoneStats() + require.False(t, stats.TombstoneExists) ts = tsm1.NewTombstoner(f.Name(), nil) entries = mustReadAll(ts) diff --git a/tsdb/index.go b/tsdb/index.go index 5354a06d4ca..b1e8565fc6b 100644 --- a/tsdb/index.go +++ b/tsdb/index.go @@ -1315,7 +1315,7 @@ func (is IndexSet) DedupeInmemIndexes() IndexSet { // MeasurementNamesByExpr returns a slice of measurement names matching the // provided condition. If no condition is provided then all names are returned. -func (is IndexSet) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { +func (is IndexSet) MeasurementNamesByExpr(auth query.FineAuthorizer, expr influxql.Expr) ([][]byte, error) { release := is.SeriesFile.Retain() defer release() @@ -1355,7 +1355,7 @@ func (is IndexSet) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.E return slices.CopyChunkedByteSlices(names, 1000), nil } -func (is IndexSet) measurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { +func (is IndexSet) measurementNamesByExpr(auth query.FineAuthorizer, expr influxql.Expr) ([][]byte, error) { if expr == nil { return nil, nil } @@ -1422,7 +1422,7 @@ func (is IndexSet) measurementNamesByExpr(auth query.Authorizer, expr influxql.E } // measurementNamesByNameFilter returns matching measurement names in sorted order. -func (is IndexSet) measurementNamesByNameFilter(auth query.Authorizer, op influxql.Token, val string, regex *regexp.Regexp) ([][]byte, error) { +func (is IndexSet) measurementNamesByNameFilter(auth query.FineAuthorizer, op influxql.Token, val string, regex *regexp.Regexp) ([][]byte, error) { itr, err := is.measurementIterator() if err != nil { return nil, err @@ -1464,7 +1464,7 @@ func (is IndexSet) measurementNamesByNameFilter(auth query.Authorizer, op influx // provided condition. If no condition is provided then all names are returned. // This behaves differently from MeasurementNamesByExpr because it will // return measurements using flux predicates. -func (is IndexSet) MeasurementNamesByPredicate(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { +func (is IndexSet) MeasurementNamesByPredicate(auth query.FineAuthorizer, expr influxql.Expr) ([][]byte, error) { release := is.SeriesFile.Retain() defer release() @@ -1504,7 +1504,7 @@ func (is IndexSet) MeasurementNamesByPredicate(auth query.Authorizer, expr influ return slices.CopyChunkedByteSlices(names, 1000), nil } -func (is IndexSet) measurementNamesByPredicate(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { +func (is IndexSet) measurementNamesByPredicate(auth query.FineAuthorizer, expr influxql.Expr) ([][]byte, error) { if expr == nil { return nil, nil } @@ -1570,7 +1570,7 @@ func (is IndexSet) measurementNamesByPredicate(auth query.Authorizer, expr influ } } -func (is IndexSet) measurementNamesByTagFilter(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) ([][]byte, error) { +func (is IndexSet) measurementNamesByTagFilter(auth query.FineAuthorizer, op influxql.Token, key, val string, regex *regexp.Regexp) ([][]byte, error) { var names [][]byte mitr, err := is.measurementIterator() @@ -1697,7 +1697,7 @@ func (is IndexSet) measurementNamesByTagFilter(auth query.Authorizer, op influxq return names, nil } -func (is IndexSet) measurementNamesByTagPredicate(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) ([][]byte, error) { +func (is IndexSet) measurementNamesByTagPredicate(auth query.FineAuthorizer, op influxql.Token, key, val string, regex *regexp.Regexp) ([][]byte, error) { var names [][]byte mitr, err := is.measurementIterator() @@ -1708,14 +1708,14 @@ func (is IndexSet) measurementNamesByTagPredicate(auth query.Authorizer, op infl } defer mitr.Close() - var checkMeasurement func(auth query.Authorizer, me []byte) (bool, error) + var checkMeasurement func(auth query.FineAuthorizer, me []byte) (bool, error) switch op { case influxql.EQ: - checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { + checkMeasurement = func(auth query.FineAuthorizer, me []byte) (bool, error) { return is.measurementHasTagValue(auth, me, []byte(key), []byte(val)) } case influxql.NEQ: - checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { + checkMeasurement = func(auth query.FineAuthorizer, me []byte) (bool, error) { // If there is an authorized series in this measurement and that series // does not contain the tag key/value. ok := is.measurementAuthorizedSeries(auth, me, func(tags models.Tags) bool { @@ -1724,11 +1724,11 @@ func (is IndexSet) measurementNamesByTagPredicate(auth query.Authorizer, op infl return ok, nil } case influxql.EQREGEX: - checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { + checkMeasurement = func(auth query.FineAuthorizer, me []byte) (bool, error) { return is.measurementHasTagValueRegex(auth, me, []byte(key), regex) } case influxql.NEQREGEX: - checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { + checkMeasurement = func(auth query.FineAuthorizer, me []byte) (bool, error) { // If there is an authorized series in this measurement and that series // does not contain the tag key/value. ok := is.measurementAuthorizedSeries(auth, me, func(tags models.Tags) bool { @@ -1762,7 +1762,7 @@ func (is IndexSet) measurementNamesByTagPredicate(auth query.Authorizer, op infl // measurementAuthorizedSeries determines if the measurement contains a series // that is authorized to be read. -func (is IndexSet) measurementAuthorizedSeries(auth query.Authorizer, name []byte, exclude func(tags models.Tags) bool) bool { +func (is IndexSet) measurementAuthorizedSeries(auth query.FineAuthorizer, name []byte, exclude func(tags models.Tags) bool) bool { if query.AuthorizerIsOpen(auth) && exclude == nil { return true } @@ -1798,7 +1798,7 @@ func (is IndexSet) measurementAuthorizedSeries(auth query.Authorizer, name []byt } } -func (is IndexSet) measurementHasTagValue(auth query.Authorizer, me, key, value []byte) (bool, error) { +func (is IndexSet) measurementHasTagValue(auth query.FineAuthorizer, me, key, value []byte) (bool, error) { if len(value) == 0 { return is.measurementHasEmptyTagValue(auth, me, key) } @@ -1836,7 +1836,7 @@ func (is IndexSet) measurementHasTagValue(auth query.Authorizer, me, key, value } } -func (is IndexSet) measurementHasEmptyTagValue(auth query.Authorizer, me, key []byte) (bool, error) { +func (is IndexSet) measurementHasEmptyTagValue(auth query.FineAuthorizer, me, key []byte) (bool, error) { // Any series that does not have a tag key // has an empty tag value for that key. // Iterate through all of the series to find one @@ -1871,7 +1871,7 @@ func (is IndexSet) measurementHasEmptyTagValue(auth query.Authorizer, me, key [] } } -func (is IndexSet) measurementHasTagValueRegex(auth query.Authorizer, me, key []byte, value *regexp.Regexp) (bool, error) { +func (is IndexSet) measurementHasTagValueRegex(auth query.FineAuthorizer, me, key []byte, value *regexp.Regexp) (bool, error) { // If the regex matches the empty string, do a special check to see // if we have an empty tag value. if matchEmpty := value.MatchString(""); matchEmpty { @@ -2032,7 +2032,7 @@ func (is IndexSet) tagValueIterator(name, key []byte) (TagValueIterator, error) // TagKeyHasAuthorizedSeries determines if there exists an authorized series for // the provided measurement name and tag key. -func (is IndexSet) TagKeyHasAuthorizedSeries(auth query.Authorizer, name, tagKey []byte) (bool, error) { +func (is IndexSet) TagKeyHasAuthorizedSeries(auth query.FineAuthorizer, name, tagKey []byte) (bool, error) { if !is.HasInmemIndex() && query.AuthorizerIsOpen(auth) { return true, nil } @@ -2674,7 +2674,7 @@ func (is IndexSet) matchTagValueNotEqualNotEmptySeriesIDIterator(name, key []byt // // N.B tagValuesByKeyAndExpr relies on keys being sorted in ascending // lexicographic order. -func (is IndexSet) TagValuesByKeyAndExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, fieldset *MeasurementFieldSet) ([]map[string]struct{}, error) { +func (is IndexSet) TagValuesByKeyAndExpr(auth query.FineAuthorizer, name []byte, keys []string, expr influxql.Expr, fieldset *MeasurementFieldSet) ([]map[string]struct{}, error) { release := is.SeriesFile.Retain() defer release() return is.tagValuesByKeyAndExpr(auth, name, keys, expr) @@ -2685,7 +2685,7 @@ func (is IndexSet) TagValuesByKeyAndExpr(auth query.Authorizer, name []byte, key // // tagValuesByKeyAndExpr guarantees to never take any locks on the underlying // series file. -func (is IndexSet) tagValuesByKeyAndExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr) ([]map[string]struct{}, error) { +func (is IndexSet) tagValuesByKeyAndExpr(auth query.FineAuthorizer, name []byte, keys []string, expr influxql.Expr) ([]map[string]struct{}, error) { database := is.Database() valueExpr := influxql.CloneExpr(expr) @@ -2772,7 +2772,7 @@ func (is IndexSet) tagValuesByKeyAndExpr(auth query.Authorizer, name []byte, key } // MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression. -func (is IndexSet) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { +func (is IndexSet) MeasurementTagKeyValuesByExpr(auth query.FineAuthorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { if len(keys) == 0 { return nil, nil } diff --git a/tsdb/index/inmem/inmem.go b/tsdb/index/inmem/inmem.go index 4f58ec98aba..757676b79de 100644 --- a/tsdb/index/inmem/inmem.go +++ b/tsdb/index/inmem/inmem.go @@ -378,7 +378,7 @@ func (i *Index) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[s // TagKeyHasAuthorizedSeries determines if there exists an authorized series for // the provided measurement name and tag key. -func (i *Index) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool { +func (i *Index) TagKeyHasAuthorizedSeries(auth query.FineAuthorizer, name []byte, key string) bool { i.mu.RLock() mm := i.measurements[string(name)] i.mu.RUnlock() @@ -422,7 +422,7 @@ func (i *Index) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, ke // // See tsm1.Engine.MeasurementTagKeyValuesByExpr for a fuller description of this // method. -func (i *Index) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { +func (i *Index) MeasurementTagKeyValuesByExpr(auth query.FineAuthorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { i.mu.RLock() mm := i.measurements[string(name)] i.mu.RUnlock() @@ -540,7 +540,7 @@ func (i *Index) TagsForSeries(key string) (models.Tags, error) { // // TODO(edd): Remove authorisation from these methods. There shouldn't need to // be any auth passed down into the index. -func (i *Index) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { +func (i *Index) MeasurementNamesByExpr(auth query.FineAuthorizer, expr influxql.Expr) ([][]byte, error) { i.mu.RLock() defer i.mu.RUnlock() @@ -559,7 +559,7 @@ func (i *Index) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr return i.measurementNamesByExpr(auth, expr) } -func (i *Index) measurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { +func (i *Index) measurementNamesByExpr(auth query.FineAuthorizer, expr influxql.Expr) ([][]byte, error) { if expr == nil { return nil, nil } @@ -625,7 +625,7 @@ func (i *Index) measurementNamesByExpr(auth query.Authorizer, expr influxql.Expr } // measurementNamesByNameFilter returns the sorted measurements matching a name. -func (i *Index) measurementNamesByNameFilter(auth query.Authorizer, op influxql.Token, val string, regex *regexp.Regexp) [][]byte { +func (i *Index) measurementNamesByNameFilter(auth query.FineAuthorizer, op influxql.Token, val string, regex *regexp.Regexp) [][]byte { var names [][]byte for _, m := range i.measurements { var matched bool @@ -649,7 +649,7 @@ func (i *Index) measurementNamesByNameFilter(auth query.Authorizer, op influxql. } // measurementNamesByTagFilters returns the sorted measurements matching the filters on tag values. -func (i *Index) measurementNamesByTagFilters(auth query.Authorizer, filter *TagFilter) [][]byte { +func (i *Index) measurementNamesByTagFilters(auth query.FineAuthorizer, filter *TagFilter) [][]byte { // Build a list of measurements matching the filters. var names [][]byte var tagMatch bool diff --git a/tsdb/index/inmem/meta.go b/tsdb/index/inmem/meta.go index 1b48f3eb8ba..dccd3580539 100644 --- a/tsdb/index/inmem/meta.go +++ b/tsdb/index/inmem/meta.go @@ -90,7 +90,7 @@ func (m *measurement) bytes() int { // Authorized determines if this Measurement is authorized to be read, according // to the provided Authorizer. A measurement is authorized to be read if at // least one undeleted series from the measurement is authorized to be read. -func (m *measurement) Authorized(auth query.Authorizer) bool { +func (m *measurement) Authorized(auth query.FineAuthorizer) bool { // Note(edd): the cost of this check scales linearly with the number of series // belonging to a measurement, which means it may become expensive when there // are large numbers of series on a measurement. @@ -1423,7 +1423,7 @@ func (m *measurement) TagKeys() []string { } // TagValues returns all the values for the given tag key, in an arbitrary order. -func (m *measurement) TagValues(auth query.Authorizer, key string) []string { +func (m *measurement) TagValues(auth query.FineAuthorizer, key string) []string { m.mu.RLock() defer m.mu.RUnlock() values := make([]string, 0, m.seriesByTagKeyValue[key].Cardinality()) diff --git a/tsdb/internal/meta.pb.go b/tsdb/internal/meta.pb.go index b09b9d74b33..af601763d46 100644 --- a/tsdb/internal/meta.pb.go +++ b/tsdb/internal/meta.pb.go @@ -3,11 +3,15 @@ package tsdb -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import io "io" + proto "github.com/gogo/protobuf/proto" + + math "math" + + io "io" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/tsdb/shard.go b/tsdb/shard.go index cad98fcba21..7d777887f33 100644 --- a/tsdb/shard.go +++ b/tsdb/shard.go @@ -12,6 +12,7 @@ import ( "regexp" "runtime" "sort" + "strconv" "strings" "sync" "sync/atomic" @@ -760,11 +761,7 @@ func (s *Shard) createFieldsAndMeasurements(fieldsToCreate []*FieldCreate) error s.index.SetFieldName(f.Measurement, f.Field.Name) } - if len(fieldsToCreate) > 0 { - return engine.MeasurementFieldSet().Save() - } - - return nil + return engine.MeasurementFieldSet().Save() } // DeleteSeriesRange deletes all values from for seriesKeys between min and max (inclusive) @@ -842,7 +839,7 @@ func (s *Shard) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[s // MeasurementTagKeyValuesByExpr returns all the tag keys values for the // provided expression. -func (s *Shard) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, key []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { +func (s *Shard) MeasurementTagKeyValuesByExpr(auth query.FineAuthorizer, name []byte, key []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { index, err := s.Index() if err != nil { return nil, err @@ -1177,12 +1174,12 @@ func (s *Shard) Import(r io.Reader, basePath string) error { // CreateSnapshot will return a path to a temp directory // containing hard links to the underlying shard files. -func (s *Shard) CreateSnapshot() (string, error) { +func (s *Shard) CreateSnapshot(skipCacheOk bool) (string, error) { engine, err := s.Engine() if err != nil { return "", err } - return engine.CreateSnapshot() + return engine.CreateSnapshot(skipCacheOk) } // ForEachMeasurementName iterates over each measurement in the shard. @@ -1687,16 +1684,20 @@ func (m *MeasurementFields) ForEachField(fn func(name string, typ influxql.DataT type MeasurementFieldSet struct { mu sync.RWMutex fields map[string]*MeasurementFields - // path is the location to persist field sets path string + // ephemeral counters for updating the file on disk + memoryVersion uint64 + writtenVersion uint64 } // NewMeasurementFieldSet returns a new instance of MeasurementFieldSet. func NewMeasurementFieldSet(path string) (*MeasurementFieldSet, error) { fs := &MeasurementFieldSet{ - fields: make(map[string]*MeasurementFields), - path: path, + fields: make(map[string]*MeasurementFields), + path: path, + memoryVersion: 0, + writtenVersion: 0, } // If there is a load error, return the error and an empty set so @@ -1781,21 +1782,41 @@ func (fs *MeasurementFieldSet) IsEmpty() bool { return len(fs.fields) == 0 } -func (fs *MeasurementFieldSet) Save() error { - fs.mu.Lock() - defer fs.mu.Unlock() - - return fs.saveNoLock() -} +func (fs *MeasurementFieldSet) Save() (err error) { + // current version + var v uint64 + // Is the MeasurementFieldSet empty? + isEmpty := false + // marshaled MeasurementFieldSet + + b, err := func() ([]byte, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + fs.memoryVersion += 1 + v = fs.memoryVersion + // If no fields left, remove the fields index file + if len(fs.fields) == 0 { + isEmpty = true + if err := os.RemoveAll(fs.path); err != nil { + return nil, err + } else { + fs.writtenVersion = fs.memoryVersion + return nil, nil + } + } + return fs.marshalMeasurementFieldSetNoLock() + }() -func (fs *MeasurementFieldSet) saveNoLock() error { - // No fields left, remove the fields index file - if len(fs.fields) == 0 { - return os.RemoveAll(fs.path) + if err != nil { + return err + } else if isEmpty { + return nil } // Write the new index to a temp file and rename when it's sync'd - path := fs.path + ".tmp" + // if it is still the most recent memoryVersion of the MeasurementFields + path := fs.path + "." + strconv.FormatUint(v, 10) + ".tmp" + fd, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_EXCL|os.O_SYNC, 0666) if err != nil { return err @@ -1806,28 +1827,6 @@ func (fs *MeasurementFieldSet) saveNoLock() error { return err } - pb := internal.MeasurementFieldSet{ - Measurements: make([]*internal.MeasurementFields, 0, len(fs.fields)), - } - for name, mf := range fs.fields { - fs := &internal.MeasurementFields{ - Name: []byte(name), - Fields: make([]*internal.Field, 0, mf.FieldN()), - } - - mf.ForEachField(func(field string, typ influxql.DataType) bool { - fs.Fields = append(fs.Fields, &internal.Field{Name: []byte(field), Type: int32(typ)}) - return true - }) - - pb.Measurements = append(pb.Measurements, fs) - } - - b, err := proto.Marshal(&pb) - if err != nil { - return err - } - if _, err := fd.Write(b); err != nil { return err } @@ -1841,11 +1840,52 @@ func (fs *MeasurementFieldSet) saveNoLock() error { return err } + fs.mu.Lock() + defer fs.mu.Unlock() + + // Check if a later modification and save of fields has superseded ours + // If so, we are successfully done! We were beaten by a later call + // to this function + if fs.writtenVersion > v { + return nil + } + if err := file.RenameFile(path, fs.path); err != nil { return err } - return file.SyncDir(filepath.Dir(fs.path)) + if err = file.SyncDir(filepath.Dir(fs.path)); err != nil { + return err + } + // Update the written version to the current version + fs.writtenVersion = v + return nil +} + +func (fs *MeasurementFieldSet) marshalMeasurementFieldSetNoLock() (marshalled []byte, err error) { + pb := internal.MeasurementFieldSet{ + Measurements: make([]*internal.MeasurementFields, 0, len(fs.fields)), + } + + for name, mf := range fs.fields { + imf := &internal.MeasurementFields{ + Name: []byte(name), + Fields: make([]*internal.Field, 0, mf.FieldN()), + } + + mf.ForEachField(func(field string, typ influxql.DataType) bool { + imf.Fields = append(imf.Fields, &internal.Field{Name: []byte(field), Type: int32(typ)}) + return true + }) + + pb.Measurements = append(pb.Measurements, imf) + } + b, err := proto.Marshal(&pb) + if err != nil { + return nil, err + } else { + return b, nil + } } func (fs *MeasurementFieldSet) load() error { diff --git a/tsdb/shard_test.go b/tsdb/shard_test.go index bb58f401688..3b80aa550f2 100644 --- a/tsdb/shard_test.go +++ b/tsdb/shard_test.go @@ -456,7 +456,7 @@ func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) { } _ = sh.WritePoints(points[:500]) - if f, err := sh.CreateSnapshot(); err == nil { + if f, err := sh.CreateSnapshot(false); err == nil { os.RemoveAll(f) } @@ -472,7 +472,7 @@ func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) { } _ = sh.WritePoints(points[500:]) - if f, err := sh.CreateSnapshot(); err == nil { + if f, err := sh.CreateSnapshot(false); err == nil { os.RemoveAll(f) } } @@ -1696,6 +1696,71 @@ func TestMeasurementFieldSet_InvalidFormat(t *testing.T) { } } +func TestMeasurementFieldSet_ConcurrentSave(t *testing.T) { + var iterations int + dir, cleanup := MustTempDir() + defer cleanup() + + if testing.Short() { + iterations = 50 + } else { + iterations = 200 + } + + mt := []string{"cpu", "dpu", "epu", "fpu"} + ft := make([][]string, len(mt), len(mt)) + for mi, m := range mt { + ft[mi] = make([]string, iterations, iterations) + for i := 0; i < iterations; i += 1 { + ft[mi][i] = fmt.Sprintf("%s_%s_%d", m, "value", i) + } + } + + path := filepath.Join(dir, "fields.idx") + mfs, err := tsdb.NewMeasurementFieldSet(path) + if err != nil { + t.Fatalf("NewMeasurementFieldSet error: %v", err) + } + var wg sync.WaitGroup + + wg.Add(len(ft)) + for i, fs := range ft { + go testFieldMaker(t, &wg, mfs, mt[i], fs) + } + wg.Wait() + + mfs2, err := tsdb.NewMeasurementFieldSet(path) + if err != nil { + t.Fatalf("NewMeasurementFieldSet error: %v", err) + } + for i, fs := range ft { + mf := mfs.Fields([]byte(mt[i])) + mf2 := mfs2.Fields([]byte(mt[i])) + for _, f := range fs { + if mf2.Field(f) == nil { + t.Fatalf("Created field not found on reloaded MeasurementFieldSet %s", f) + } + if mf.Field(f) == nil { + t.Fatalf("Created field not found in original MeasureMentFieldSet: %s", f) + } + } + } + +} + +func testFieldMaker(t *testing.T, wg *sync.WaitGroup, mf *tsdb.MeasurementFieldSet, measurement string, fieldNames []string) { + defer wg.Done() + fields := mf.CreateFieldsIfNotExists([]byte(measurement)) + for _, fieldName := range fieldNames { + if err := fields.CreateFieldIfNotExists([]byte(fieldName), influxql.Float); err != nil { + t.Fatalf("create field error: %v", err) + } + if err := mf.Save(); err != nil { + t.Fatalf("save error: %v", err) + } + } +} + func BenchmarkWritePoints_NewSeries_1K(b *testing.B) { benchmarkWritePoints(b, 38, 3, 3, 1) } func BenchmarkWritePoints_NewSeries_100K(b *testing.B) { benchmarkWritePoints(b, 32, 5, 5, 1) } func BenchmarkWritePoints_NewSeries_250K(b *testing.B) { benchmarkWritePoints(b, 80, 5, 5, 1) } diff --git a/tsdb/store.go b/tsdb/store.go index 5e30f84e24b..2ada9f31733 100644 --- a/tsdb/store.go +++ b/tsdb/store.go @@ -134,19 +134,18 @@ func (s *Store) Statistics(tags map[string]string) []models.Statistic { s.mu.RLock() shards := s.shardsSlice() s.mu.RUnlock() - // Add all the series and measurements cardinality estimations. databases := s.Databases() statistics := make([]models.Statistic, 0, len(databases)) for _, database := range databases { log := s.Logger.With(logger.Database(database)) - sc, err := s.SeriesCardinality(database) + sc, err := s.SeriesCardinality(context.Background(), database) if err != nil { log.Info("Cannot retrieve series cardinality", zap.Error(err)) continue } - mc, err := s.MeasurementsCardinality(database) + mc, err := s.MeasurementsCardinality(context.Background(), database) if err != nil { log.Info("Cannot retrieve measurement cardinality", zap.Error(err)) continue @@ -675,13 +674,13 @@ func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, en // CreateShardSnapShot will create a hard link to the underlying shard and return a path. // The caller is responsible for cleaning up (removing) the file path returned. -func (s *Store) CreateShardSnapshot(id uint64) (string, error) { +func (s *Store) CreateShardSnapshot(id uint64, skipCacheOk bool) (string, error) { sh := s.Shard(id) if sh == nil { return "", ErrShardNotFound } - return sh.CreateSnapshot() + return sh.CreateSnapshot(skipCacheOk) } // SetShardEnabled enables or disables a shard for read and writes. @@ -1131,7 +1130,7 @@ func (s *Store) sketchesForDatabase(dbName string, getSketches func(*Shard) (est // Cardinality is calculated exactly by unioning all shards' bitsets of series // IDs. The result of this method cannot be combined with any other results. // -func (s *Store) SeriesCardinality(database string) (int64, error) { +func (s *Store) SeriesCardinality(ctx context.Context, database string) (int64, error) { s.mu.RLock() shards := s.filterShards(byDatabase(database)) s.mu.RUnlock() @@ -1139,23 +1138,35 @@ func (s *Store) SeriesCardinality(database string) (int64, error) { var setMu sync.Mutex others := make([]*SeriesIDSet, 0, len(shards)) - s.walkShards(shards, func(sh *Shard) error { - index, err := sh.Index() - if err != nil { - return err - } + err := s.walkShards(shards, func(sh *Shard) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + index, err := sh.Index() + if err != nil { + return err + } - seriesIDs := index.SeriesIDSet() - setMu.Lock() - others = append(others, seriesIDs) - setMu.Unlock() + seriesIDs := index.SeriesIDSet() + setMu.Lock() + others = append(others, seriesIDs) + setMu.Unlock() - return nil + return nil + } }) - + if err != nil { + return 0, err + } ss := NewSeriesIDSet() ss.Merge(others...) - return int64(ss.Cardinality()), nil + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + return int64(ss.Cardinality()), nil + } } // SeriesSketches returns the sketches associated with the series data in all @@ -1163,8 +1174,13 @@ func (s *Store) SeriesCardinality(database string) (int64, error) { // // The returned sketches can be combined with other sketches to provide an // estimation across distributed databases. -func (s *Store) SeriesSketches(database string) (estimator.Sketch, estimator.Sketch, error) { +func (s *Store) SeriesSketches(ctx context.Context, database string) (estimator.Sketch, estimator.Sketch, error) { return s.sketchesForDatabase(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + default: + } if sh == nil { return nil, nil, errors.New("shard nil, can't get cardinality") } @@ -1177,13 +1193,8 @@ func (s *Store) SeriesSketches(database string) (estimator.Sketch, estimator.Ske // // Cardinality is calculated using a sketch-based estimation. The result of this // method cannot be combined with any other results. -func (s *Store) MeasurementsCardinality(database string) (int64, error) { - ss, ts, err := s.sketchesForDatabase(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { - if sh == nil { - return nil, nil, errors.New("shard nil, can't get cardinality") - } - return sh.MeasurementsSketches() - }) +func (s *Store) MeasurementsCardinality(ctx context.Context, database string) (int64, error) { + ss, ts, err := s.MeasurementsSketches(ctx, database) if err != nil { return 0, err @@ -1196,12 +1207,18 @@ func (s *Store) MeasurementsCardinality(database string) (int64, error) { // // The returned sketches can be combined with other sketches to provide an // estimation across distributed databases. -func (s *Store) MeasurementsSketches(database string) (estimator.Sketch, estimator.Sketch, error) { +func (s *Store) MeasurementsSketches(ctx context.Context, database string) (estimator.Sketch, estimator.Sketch, error) { return s.sketchesForDatabase(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { - if sh == nil { - return nil, nil, errors.New("shard nil, can't get cardinality") + // every iteration, check for timeout. + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + default: + if sh == nil { + return nil, nil, errors.New("shard nil, can't get cardinality") + } + return sh.MeasurementsSketches() } - return sh.MeasurementsSketches() }) } @@ -1438,7 +1455,7 @@ func (s *Store) WriteToShardWithContext(ctx context.Context, shardID uint64, poi // MeasurementNames returns a slice of all measurements. Measurements accepts an // optional condition expression. If cond is nil, then all measurements for the // database will be returned. -func (s *Store) MeasurementNames(auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) { +func (s *Store) MeasurementNames(ctx context.Context, auth query.FineAuthorizer, database string, cond influxql.Expr) ([][]byte, error) { s.mu.RLock() shards := s.filterShards(byDatabase(database)) s.mu.RUnlock() @@ -1458,6 +1475,11 @@ func (s *Store) MeasurementNames(auth query.Authorizer, database string, cond in is.Indexes = append(is.Indexes, index) } is = is.DedupeInmemIndexes() + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } return is.MeasurementNamesByExpr(auth, cond) } @@ -1480,7 +1502,7 @@ func (a TagKeysSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a TagKeysSlice) Less(i, j int) bool { return a[i].Measurement < a[j].Measurement } // TagKeys returns the tag keys in the given database, matching the condition. -func (s *Store) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]TagKeys, error) { +func (s *Store) TagKeys(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]TagKeys, error) { if len(shardIDs) == 0 { return nil, nil } @@ -1553,6 +1575,12 @@ func (s *Store) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql. var results []TagKeys for _, name := range names { + // Check for timeouts + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } // Build keyset over all indexes for measurement. tagKeySet, err := is.MeasurementTagKeysByExpr(name, nil) if err != nil { @@ -1566,6 +1594,12 @@ func (s *Store) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql. // If they have authorized series associated with them. if filterExpr == nil { for tagKey := range tagKeySet { + // check for timeouts + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } ok, err := is.TagKeyHasAuthorizedSeries(auth, []byte(name), []byte(tagKey)) if err != nil { return nil, err @@ -1646,7 +1680,7 @@ func (a tagValuesSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[ // TagValues returns the tag keys and values for the provided shards, where the // tag values satisfy the provided condition. -func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]TagValues, error) { +func (s *Store) TagValues(ctx context.Context, auth query.FineAuthorizer, shardIDs []uint64, cond influxql.Expr) ([]TagValues, error) { if cond == nil { return nil, errors.New("a condition is required") } @@ -1735,6 +1769,13 @@ func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxq // values from matching series. Series may be filtered using a WHERE // filter. for _, name := range names { + // check for timeouts + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + // Determine a list of keys from condition. keySet, err := is.MeasurementTagKeysByExpr(name, cond) if err != nil { @@ -1797,6 +1838,13 @@ func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxq // instances of tagValues for a given measurement. idxBuf := make([][2]int, 0, len(is.Indexes)) for i < len(allResults) { + // check for timeouts + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + // Gather all occurrences of the same measurement for merging. for j+1 < len(allResults) && bytes.Equal(allResults[j+1].name, allResults[i].name) { j++ diff --git a/tsdb/store_test.go b/tsdb/store_test.go index 5860805b125..820e064bc66 100644 --- a/tsdb/store_test.go +++ b/tsdb/store_test.go @@ -288,13 +288,12 @@ func TestStore_DropConcurrentWriteMultipleShards(t *testing.T) { t.Fatal(err) } } - err := s.DeleteMeasurement("db0", "cpu") if err != nil { t.Fatal(err) } - measurements, err := s.MeasurementNames(query.OpenAuthorizer, "db0", nil) + measurements, err := s.MeasurementNames(context.Background(), query.OpenAuthorizer, "db0", nil) if err != nil { t.Fatal(err) } @@ -354,7 +353,7 @@ func TestStore_WriteMixedShards(t *testing.T) { wg.Wait() - keys, err := s.TagKeys(nil, []uint64{1, 2}, nil) + keys, err := s.TagKeys(context.Background(), nil, []uint64{1, 2}, nil) if err != nil { t.Fatal(err) } @@ -454,7 +453,7 @@ func TestStore_DeleteShard(t *testing.T) { // cpu,serverb=b should be removed from the series file for db0 because // shard 1 was the only owner of that series. // Verify by getting all tag keys. - keys, err := s.TagKeys(nil, []uint64{2}, nil) + keys, err := s.TagKeys(context.Background(), nil, []uint64{2}, nil) if err != nil { return err } @@ -469,7 +468,7 @@ func TestStore_DeleteShard(t *testing.T) { // Verify that the same series was not removed from other databases' // series files. - if keys, err = s.TagKeys(nil, []uint64{3}, nil); err != nil { + if keys, err = s.TagKeys(context.Background(), nil, []uint64{3}, nil); err != nil { return err } @@ -504,7 +503,7 @@ func TestStore_CreateShardSnapShot(t *testing.T) { t.Fatalf("expected shard") } - dir, e := s.CreateShardSnapshot(1) + dir, e := s.CreateShardSnapshot(1, false) if e != nil { t.Fatal(e) } @@ -867,7 +866,7 @@ func TestStore_MeasurementNames_Deduplicate(t *testing.T) { `cpu value=3 20`, ) - meas, err := s.MeasurementNames(query.OpenAuthorizer, "db0", nil) + meas, err := s.MeasurementNames(context.Background(), query.OpenAuthorizer, "db0", nil) if err != nil { t.Fatalf("unexpected error with MeasurementNames: %v", err) } @@ -908,7 +907,7 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) { } // Delete all the series for each measurement. - mnames, err := store.MeasurementNames(nil, "db", nil) + mnames, err := store.MeasurementNames(context.Background(), nil, "db", nil) if err != nil { t.Fatal(err) } @@ -920,7 +919,7 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) { } // Estimate the series cardinality... - cardinality, err := store.Store.SeriesCardinality("db") + cardinality, err := store.Store.SeriesCardinality(context.Background(), "db") if err != nil { t.Fatal(err) } @@ -932,7 +931,7 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) { // Since all the series have been deleted, all the measurements should have // been removed from the index too. - if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil { + if cardinality, err = store.Store.MeasurementsCardinality(context.Background(), "db"); err != nil { t.Fatal(err) } @@ -986,7 +985,7 @@ func testStoreCardinalityUnique(t *testing.T, store *Store) { } // Estimate the series cardinality... - cardinality, err := store.Store.SeriesCardinality("db") + cardinality, err := store.Store.SeriesCardinality(context.Background(), "db") if err != nil { t.Fatal(err) } @@ -997,7 +996,7 @@ func testStoreCardinalityUnique(t *testing.T, store *Store) { } // Estimate the measurement cardinality... - if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil { + if cardinality, err = store.Store.MeasurementsCardinality(context.Background(), "db"); err != nil { t.Fatal(err) } @@ -1068,7 +1067,7 @@ func testStoreCardinalityDuplicates(t *testing.T, store *Store) { } // Estimate the series cardinality... - cardinality, err := store.Store.SeriesCardinality("db") + cardinality, err := store.Store.SeriesCardinality(context.Background(), "db") if err != nil { t.Fatal(err) } @@ -1079,7 +1078,7 @@ func testStoreCardinalityDuplicates(t *testing.T, store *Store) { } // Estimate the measurement cardinality... - if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil { + if cardinality, err = store.Store.MeasurementsCardinality(context.Background(), "db"); err != nil { t.Fatal(err) } @@ -1112,6 +1111,150 @@ func TestStore_Cardinality_Duplicates(t *testing.T) { } } +func TestStore_MetaQuery_Timeout(t *testing.T) { + if testing.Short() || os.Getenv("APPVEYOR") != "" { + t.Skip("Skipping test in short and appveyor mode.") + } + + test := func(index string) { + store := NewStore(index) + store.EngineOptions.Config.MaxSeriesPerDatabase = 0 + if err := store.Open(); err != nil { + panic(err) + } + defer store.Close() + testStoreMetaQueryTimeout(t, store, index) + } + + for _, index := range tsdb.RegisteredIndexes() { + test(index) + } +} + +func testStoreMetaQueryTimeout(t *testing.T, store *Store, index string) { + shards := testStoreMetaQuerySetup(t, store) + + testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { + const funcName = "SeriesCardinality" + _, err := store.Store.SeriesCardinality(ctx, "db") + return funcName, err + }, index)(t) + + testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { + const funcName = "MeasurementsCardinality" + _, err := store.Store.MeasurementsCardinality(ctx, "db") + return funcName, err + }, index)(t) + + keyCondition, allCondition := testStoreMetaQueryCondition() + + testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { + const funcName = "TagValues" + _, err := store.Store.TagValues(ctx, nil, shards, allCondition) + return funcName, err + }, index)(t) + + testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { + const funcName = "TagKeys" + _, err := store.Store.TagKeys(ctx, nil, shards, keyCondition) + return funcName, err + }, index)(t) + + testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { + const funcName = "MeasurementNames" + _, err := store.Store.MeasurementNames(ctx, nil, "db", nil) + return funcName, err + }, index)(t) +} + +func testStoreMetaQueryCondition() (influxql.Expr, influxql.Expr) { + keyCondition := &influxql.ParenExpr{ + Expr: &influxql.BinaryExpr{ + Op: influxql.OR, + LHS: &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "_tagKey"}, + RHS: &influxql.StringLiteral{Val: "tagKey4"}, + }, + RHS: &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "_tagKey"}, + RHS: &influxql.StringLiteral{Val: "tagKey5"}, + }, + }, + } + + whereCondition := &influxql.ParenExpr{ + Expr: &influxql.BinaryExpr{ + Op: influxql.AND, + LHS: &influxql.ParenExpr{ + Expr: &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "tagKey1"}, + RHS: &influxql.StringLiteral{Val: "tagValue2"}, + }, + }, + RHS: keyCondition, + }, + } + + allCondition := &influxql.BinaryExpr{ + Op: influxql.AND, + LHS: &influxql.ParenExpr{ + Expr: &influxql.BinaryExpr{ + Op: influxql.EQREGEX, + LHS: &influxql.VarRef{Val: "tagKey3"}, + RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`tagValue\d`)}, + }, + }, + RHS: whereCondition, + } + return keyCondition, allCondition +} + +func testStoreMetaQuerySetup(t *testing.T, store *Store) []uint64 { + const measurementCnt = 64 + const tagCnt = 5 + const valueCnt = 5 + const pointsPerShard = 20000 + + // Generate point data to write to the shards. + series := genTestSeries(measurementCnt, tagCnt, valueCnt) + + points := make([]models.Point, 0, len(series)) + for _, s := range series { + points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) + } + // Create requested number of shards in the store & write points across + // shards such that we never write the same series to multiple shards. + shards := make([]uint64, len(points)/pointsPerShard) + for shardID := 0; shardID < len(points)/pointsPerShard; shardID++ { + if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil { + t.Fatalf("create shard: %s", err) + } + if err := store.BatchWrite(shardID, points[shardID*pointsPerShard:(shardID+1)*pointsPerShard]); err != nil { + t.Fatalf("batch write: %s", err) + } + shards[shardID] = uint64(shardID) + } + return shards +} + +func testStoreMakeTimedFuncs(tested func(context.Context) (string, error), index string) func(*testing.T) { + cancelTested := func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(0)) + defer cancel() + + funcName, err := tested(ctx) + if err == nil { + t.Fatalf("%v: failed to time out with index type %v", funcName, index) + } else if !strings.Contains(err.Error(), context.DeadlineExceeded.Error()) { + t.Fatalf("%v: failed with %v instead of %v with index type %v", funcName, err, context.DeadlineExceeded, index) + } + } + return cancelTested +} + // Creates a large number of series in multiple shards, which will force // compactions to occur. func testStoreCardinalityCompactions(store *Store) error { @@ -1137,7 +1280,7 @@ func testStoreCardinalityCompactions(store *Store) error { } // Estimate the series cardinality... - cardinality, err := store.Store.SeriesCardinality("db") + cardinality, err := store.Store.SeriesCardinality(context.Background(), "db") if err != nil { return err } @@ -1148,7 +1291,7 @@ func testStoreCardinalityCompactions(store *Store) error { } // Estimate the measurement cardinality... - if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil { + if cardinality, err = store.Store.MeasurementsCardinality(context.Background(), "db"); err != nil { return err } @@ -1230,7 +1373,7 @@ func TestStore_Cardinality_Limit_On_InMem_Index(t *testing.T) { } // Get updated series cardinality from store after writing data. - cardinality, err := store.Store.SeriesCardinality("db") + cardinality, err := store.Store.SeriesCardinality(context.Background(), "db") if err != nil { t.Fatal(err) } @@ -1249,7 +1392,7 @@ func TestStore_Sketches(t *testing.T) { checkCardinalities := func(store *tsdb.Store, series, tseries, measurements, tmeasurements int) error { // Get sketches and check cardinality... - sketch, tsketch, err := store.SeriesSketches("db") + sketch, tsketch, err := store.SeriesSketches(context.Background(), "db") if err != nil { return err } @@ -1275,7 +1418,7 @@ func TestStore_Sketches(t *testing.T) { } // Check measurement cardinality. - if sketch, tsketch, err = store.MeasurementsSketches("db"); err != nil { + if sketch, tsketch, err = store.MeasurementsSketches(context.Background(), "db"); err != nil { return err } @@ -1329,7 +1472,7 @@ func TestStore_Sketches(t *testing.T) { } // Delete half the the measurements data - mnames, err := store.MeasurementNames(nil, "db", nil) + mnames, err := store.MeasurementNames(context.Background(), nil, "db", nil) if err != nil { return err } @@ -1462,9 +1605,8 @@ func TestStore_TagValues(t *testing.T) { }, } - var s *Store - setup := func(index string) []uint64 { // returns shard ids - s = MustOpenStore(index) + setup := func(index string) (*Store, []uint64) { // returns shard ids + s := MustOpenStore(index) fmtStr := `cpu1%[1]d,foo=a,ignoreme=nope,host=tv%[2]d,shard=s%[3]d value=1 %[4]d cpu1%[1]d,host=nofoo value=1 %[4]d @@ -1489,14 +1631,14 @@ func TestStore_TagValues(t *testing.T) { ids = append(ids, uint64(i)) s.MustCreateShardWithData("db0", "rp0", i, genPoints(i)...) } - return ids + return s, ids } for _, example := range examples { for _, index := range tsdb.RegisteredIndexes() { - shardIDs := setup(index) + s, shardIDs := setup(index) t.Run(example.Name+"_"+index, func(t *testing.T) { - got, err := s.TagValues(nil, shardIDs, example.Expr) + got, err := s.TagValues(context.Background(), nil, shardIDs, example.Expr) if err != nil { t.Fatal(err) } @@ -1538,7 +1680,7 @@ func TestStore_Measurements_Auth(t *testing.T) { }, } - names, err := s.MeasurementNames(authorizer, "db0", nil) + names, err := s.MeasurementNames(context.Background(), authorizer, "db0", nil) if err != nil { return err } @@ -1568,7 +1710,7 @@ func TestStore_Measurements_Auth(t *testing.T) { return err } - if names, err = s.MeasurementNames(authorizer, "db0", nil); err != nil { + if names, err = s.MeasurementNames(context.Background(), authorizer, "db0", nil); err != nil { return err } @@ -1625,7 +1767,7 @@ func TestStore_TagKeys_Auth(t *testing.T) { }, } - keys, err := s.TagKeys(authorizer, []uint64{0}, nil) + keys, err := s.TagKeys(context.Background(), authorizer, []uint64{0}, nil) if err != nil { return err } @@ -1660,7 +1802,7 @@ func TestStore_TagKeys_Auth(t *testing.T) { return err } - if keys, err = s.TagKeys(authorizer, []uint64{0}, nil); err != nil { + if keys, err = s.TagKeys(context.Background(), authorizer, []uint64{0}, nil); err != nil { return err } @@ -1723,7 +1865,7 @@ func TestStore_TagValues_Auth(t *testing.T) { }, } - values, err := s.TagValues(authorizer, []uint64{0}, &influxql.BinaryExpr{ + values, err := s.TagValues(context.Background(), authorizer, []uint64{0}, &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "_tagKey"}, RHS: &influxql.StringLiteral{Val: "host"}, @@ -1763,7 +1905,7 @@ func TestStore_TagValues_Auth(t *testing.T) { return err } - values, err = s.TagValues(authorizer, []uint64{0}, &influxql.BinaryExpr{ + values, err = s.TagValues(context.Background(), authorizer, []uint64{0}, &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "_tagKey"}, RHS: &influxql.StringLiteral{Val: "host"}, @@ -1884,7 +2026,7 @@ func TestStore_MeasurementNames_ConcurrentDropShard(t *testing.T) { errC <- nil return default: - names, err := s.MeasurementNames(nil, "db0", nil) + names, err := s.MeasurementNames(context.Background(), nil, "db0", nil) if err == tsdb.ErrIndexClosing || err == tsdb.ErrEngineClosed { continue // These errors are expected } @@ -1969,7 +2111,7 @@ func TestStore_TagKeys_ConcurrentDropShard(t *testing.T) { errC <- nil return default: - keys, err := s.TagKeys(nil, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, nil) + keys, err := s.TagKeys(context.Background(), nil, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, nil) if err == tsdb.ErrIndexClosing || err == tsdb.ErrEngineClosed { continue // These errors are expected } @@ -2070,7 +2212,7 @@ func TestStore_TagValues_ConcurrentDropShard(t *testing.T) { } cond := rewrite.(*influxql.ShowTagValuesStatement).Condition - values, err := s.TagValues(nil, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, cond) + values, err := s.TagValues(context.Background(), nil, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, cond) if err == tsdb.ErrIndexClosing || err == tsdb.ErrEngineClosed { continue // These errors are expected } @@ -2132,7 +2274,7 @@ func BenchmarkStore_SeriesCardinality_100_Shards(b *testing.B) { b.Run(store.EngineOptions.IndexVersion, func(b *testing.B) { for i := 0; i < b.N; i++ { - _, _ = store.SeriesCardinality("db") + _, _ = store.SeriesCardinality(context.Background(), "db") } }) store.Close() @@ -2214,8 +2356,7 @@ func BenchmarkStore_TagValues(b *testing.B) { {name: "s=10_m=100_v=1000", shards: 10, measurements: 100, tagValues: 1000}, } - var s *Store - setup := func(shards, measurements, tagValues int, index string, useRandom bool) []uint64 { // returns shard ids + setup := func(shards, measurements, tagValues int, index string, useRandom bool) (*Store, []uint64) { // returns shard ids s := NewStore(index) if err := s.Open(); err != nil { panic(err) @@ -2251,13 +2392,7 @@ func BenchmarkStore_TagValues(b *testing.B) { shardIDs = append(shardIDs, uint64(i)) s.MustCreateShardWithData("db0", "rp0", i, genPoints(i, useRandom)...) } - return shardIDs - } - - teardown := func() { - if err := s.Close(); err != nil { - b.Fatal(err) - } + return s, shardIDs } // SHOW TAG VALUES WITH KEY IN ("host", "shard") @@ -2296,14 +2431,20 @@ func BenchmarkStore_TagValues(b *testing.B) { for useRand := 0; useRand < 2; useRand++ { for c, condition := range []influxql.Expr{cond1, cond2} { for _, bm := range benchmarks { - shardIDs := setup(bm.shards, bm.measurements, bm.tagValues, index, useRand == 1) + s, shardIDs := setup(bm.shards, bm.measurements, bm.tagValues, index, useRand == 1) + teardown := func() { + if err := s.Close(); err != nil { + b.Fatal(err) + } + } + cnd := "Unfiltered" if c == 0 { cnd = "Filtered" } b.Run("random_values="+fmt.Sprint(useRand == 1)+"_index="+index+"_"+cnd+"_"+bm.name, func(b *testing.B) { for i := 0; i < b.N; i++ { - if tvResult, err = s.TagValues(nil, shardIDs, condition); err != nil { + if tvResult, err = s.TagValues(context.Background(), nil, shardIDs, condition); err != nil { b.Fatal(err) } }