diff --git a/.circleci/config.yml b/.circleci/config.yml index 0584241ba422f..519f8fa4007ce 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -8,7 +8,7 @@ executors: working_directory: '/go/src/github.com/influxdata/telegraf' resource_class: large docker: - - image: 'quay.io/influxdb/telegraf-ci:1.21.3' + - image: 'quay.io/influxdb/telegraf-ci:1.21.4' environment: GOFLAGS: -p=4 mac: @@ -40,7 +40,7 @@ commands: default: "v1" goversion: type: string - default: 1.21.3 + default: 1.21.4 steps: - check-changed-files-or-halt - when: @@ -229,7 +229,7 @@ jobs: parameters: goversion: type: string - default: 1.21.3 + default: 1.21.4 cache_version: type: string default: "v1" @@ -261,7 +261,7 @@ jobs: parameters: goversion: type: string - default: 1.21.3 + default: 1.21.4 cache_version: type: string default: "v1" @@ -298,7 +298,7 @@ jobs: parameters: goversion: type: string - default: 1.21.3 + default: 1.21.4 cache_version: type: string default: "v1" @@ -321,7 +321,7 @@ jobs: parameters: goversion: type: string - default: 1.21.3 + default: 1.21.4 cache_version: type: string default: "v1" @@ -350,7 +350,7 @@ jobs: parameters: goversion: type: string - default: 1.21.3 + default: 1.21.4 cache_version: type: string default: "v1" diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 7eaebf9662705..6d10ab785a115 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,36 +1,18 @@ -# Required for all PRs - - - - -- [ ] Updated associated README.md. -- [ ] Wrote appropriate unit tests. -- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) +## Checklist + - resolves # - - diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index b58fa4309f742..5be4bfa364821 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -15,5 +15,5 @@ jobs: - name: Scan for Vulnerabilities in Code uses: golang/govulncheck-action@v1 with: - go-version-input: 1.21.3 + go-version-input: 1.21.4 go-package: ./... diff --git a/.golangci.yml b/.golangci.yml index 2544296498c92..d8bb620f48ffe 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -31,6 +31,7 @@ linters: - sqlclosecheck - staticcheck - tenv + - testifylint - tparallel - typecheck - unconvert @@ -80,6 +81,7 @@ linters-settings: - badCall - badCond - badLock + - badRegexp - badSorting - builtinShadowDecl - caseOrder @@ -246,6 +248,22 @@ linters-settings: # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. # Default: false all: true + testifylint: + # Enable specific checkers. + # https://github.com/Antonboom/testifylint#checkers + # Default: ["bool-compare", "compares", "empty", "error-is-as", "error-nil", "expected-actual", "float-compare", "len", "require-error", "suite-dont-use-pkg", "suite-extra-assert-call"] + enable: + - bool-compare + - compares + - empty + - error-is-as + - error-nil + - expected-actual + - len + - require-error + - suite-dont-use-pkg + - suite-extra-assert-call + - suite-thelper run: # timeout for analysis, e.g. 30s, 5m, default is 1m diff --git a/.markdownlintignore b/.markdownlintignore index 56ddc43963ba3..513ed4e2d2198 100644 --- a/.markdownlintignore +++ b/.markdownlintignore @@ -1 +1,2 @@ +.github/PULL_REQUEST_TEMPLATE.md docs/includes/* diff --git a/CHANGELOG.md b/CHANGELOG.md index 3439e75ce03f7..976df1bedfbcd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,64 @@ removes those useless fields. In case you reference them, please adapt your queries! +## v1.28.5 [2023-11-15] + +### Bugfixes + +- [#14294](https://github.com/influxdata/telegraf/pull/14294) `inputs.ecs` Correct v4 metadata URLs +- [#14274](https://github.com/influxdata/telegraf/pull/14274) `inputs.intel_rdt` Do not fail on missing PIDs +- [#14283](https://github.com/influxdata/telegraf/pull/14283) `inputs.s7comm` Truncate strings to reported length +- [#14296](https://github.com/influxdata/telegraf/pull/14296) `parsers.json_v2` Log inner errors + +### Dependency Updates + +- [#14287](https://github.com/influxdata/telegraf/pull/14287) `deps` Bump github.com/gosnmp/gosnmp from 1.35.1-0.20230602062452-f30602b8dad6 to 1.36.1 +- [#14286](https://github.com/influxdata/telegraf/pull/14286) `deps` Bump github.com/Masterminds/semver/v3 from 3.2.0 to 3.2.1 +- [#14285](https://github.com/influxdata/telegraf/pull/14285) `deps` Bump golang.org/x/sync from 0.4.0 to 0.5.0 +- [#14289](https://github.com/influxdata/telegraf/pull/14289) `deps` Bump golang.org/x/mod from 0.13.0 to 0.14.0 +- [#14288](https://github.com/influxdata/telegraf/pull/14288) `deps` Bump google.golang.org/api from 0.149.0 to 0.150.0 + +## v1.28.4 [2023-11-13] + +### Bugfixes + +- [#14240](https://github.com/influxdata/telegraf/pull/14240) `config` Fix comment removal in TOML files +- [#14187](https://github.com/influxdata/telegraf/pull/14187) `inputs.cgroup` Escape backslashes in path +- [#14267](https://github.com/influxdata/telegraf/pull/14267) `inputs.disk` Add inodes_used_percent field +- [#14197](https://github.com/influxdata/telegraf/pull/14197) `inputs.ecs` Fix cgroupv2 CPU metrics +- [#14194](https://github.com/influxdata/telegraf/pull/14194) `inputs.ecs` Test for v4 metadata endpoint +- [#14262](https://github.com/influxdata/telegraf/pull/14262) `inputs.ipset` Parse lines with timeout +- [#14243](https://github.com/influxdata/telegraf/pull/14243) `inputs.mqtt_consumer` Resolve could not mark message delivered +- [#14195](https://github.com/influxdata/telegraf/pull/14195) `inputs.netflow` Fix sFlow metric timestamp +- [#14191](https://github.com/influxdata/telegraf/pull/14191) `inputs.prometheus` Read bearer token from file every time +- [#14068](https://github.com/influxdata/telegraf/pull/14068) `inputs.s7comm` Fix bit queries +- [#14241](https://github.com/influxdata/telegraf/pull/14241) `inputs.win_perf_counter` Do not rely on returned buffer size +- [#14176](https://github.com/influxdata/telegraf/pull/14176) `inputs.zfs` Parse metrics correctly on FreeBSD 14 +- [#14280](https://github.com/influxdata/telegraf/pull/14280) `inputs.zfs` Support gathering metrics on zfs 2.2.0 and later +- [#14115](https://github.com/influxdata/telegraf/pull/14115) `outputs.elasticsearch` Print error status value +- [#14213](https://github.com/influxdata/telegraf/pull/14213) `outputs.timestream` Clip uint64 values +- [#14149](https://github.com/influxdata/telegraf/pull/14149) `parsers.json_v2` Prevent race condition in parse function + +### Dependency Updates + +- [#14253](https://github.com/influxdata/telegraf/pull/14253) `deps` Bump cloud.google.com/go/storage from 1.30.1 to 1.34.1 +- [#14218](https://github.com/influxdata/telegraf/pull/14218) `deps` Bump github.com/aws/aws-sdk-go-v2/config from 1.18.42 to 1.19.1 +- [#14167](https://github.com/influxdata/telegraf/pull/14167) `deps` Bump github.com/aws/aws-sdk-go-v2/credentials from 1.13.40 to 1.13.43 +- [#14249](https://github.com/influxdata/telegraf/pull/14249) `deps` Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.23.5 to 1.26.0 +- [#14166](https://github.com/influxdata/telegraf/pull/14166) `deps` Bump github.com/antchfx/xmlquery from 1.3.17 to 1.3.18 +- [#14217](https://github.com/influxdata/telegraf/pull/14217) `deps` Bump github.com/antchfx/xpath from 1.2.5-0.20230505064641-588960cceeac to 1.2.5 +- [#14219](https://github.com/influxdata/telegraf/pull/14219) `deps` Bump github.com/benbjohnson/clock from 1.3.3 to 1.3.5 +- [#14216](https://github.com/influxdata/telegraf/pull/14216) `deps` Bump github.com/compose-spec/compose-go from 1.16.0 to 1.20.0 +- [#14211](https://github.com/influxdata/telegraf/pull/14211) `deps` Bump github.com/docker/docker from 24.0.6 to 24.0.7 +- [#14164](https://github.com/influxdata/telegraf/pull/14164) `deps` Bump github.com/hashicorp/consul/api from 1.24.0 to 1.25.1 +- [#14251](https://github.com/influxdata/telegraf/pull/14251) `deps` Bump github.com/hashicorp/consul/api from 1.25.1 to 1.26.1 +- [#14225](https://github.com/influxdata/telegraf/pull/14225) `deps` Bump github.com/nats-io/nkeys from 0.4.5 to 0.4.6 +- [#14168](https://github.com/influxdata/telegraf/pull/14168) `deps` Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 +- [#14252](https://github.com/influxdata/telegraf/pull/14252) `deps` Bump github.com/rabbitmq/amqp091-go from 1.8.1 to 1.9.0 +- [#14250](https://github.com/influxdata/telegraf/pull/14250) `deps` Bump github.com/showwin/speedtest-go from 1.6.6 to 1.6.7 +- [#14192](https://github.com/influxdata/telegraf/pull/14192) `deps` Bump google.golang.org/grpc from 1.58.2 to 1.58.3 +- [#14165](https://github.com/influxdata/telegraf/pull/14165) `deps` Bump k8s.io/client-go from 0.28.2 to 0.28.3 + ## v1.28.3 [2023-10-23] ### Bugfixes diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 81a1420ecf162..3a4451baa6d11 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,6 +22,11 @@ We recommend posting support questions in our [Community Slack](https://influxda ## Contributing code +### AI Generated Code + +We currently cannot accept AI generated code contributions. Code contributed +should be your own per the CLA. + ### Creating a pull request 1. [Sign the CLA][cla]. diff --git a/Makefile b/Makefile index 97804e0f26f7c..91a1d8b2cb4eb 100644 --- a/Makefile +++ b/Makefile @@ -249,8 +249,8 @@ plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl .PHONY: ci ci: - docker build -t quay.io/influxdb/telegraf-ci:1.21.3 - < scripts/ci.docker - docker push quay.io/influxdb/telegraf-ci:1.21.3 + docker build -t quay.io/influxdb/telegraf-ci:1.21.4 - < scripts/ci.docker + docker push quay.io/influxdb/telegraf-ci:1.21.4 .PHONY: install install: $(buildbin) diff --git a/cmd/telegraf/main_win_test.go b/cmd/telegraf/main_win_test.go index 42c71b99a0f9b..000d1482eb50c 100644 --- a/cmd/telegraf/main_win_test.go +++ b/cmd/telegraf/main_win_test.go @@ -33,6 +33,6 @@ func TestWindowsFlagsAreSet(t *testing.T) { require.Equal(t, expectedString, m.serviceName) require.Equal(t, expectedString, m.serviceDisplayName) require.Equal(t, expectedString, m.serviceRestartDelay) - require.Equal(t, true, m.serviceAutoRestart) - require.Equal(t, true, m.console) + require.True(t, m.serviceAutoRestart) + require.True(t, m.console) } diff --git a/config/config.go b/config/config.go index 06be5ada09f96..97f8afc974497 100644 --- a/config/config.go +++ b/config/config.go @@ -20,8 +20,6 @@ import ( "sync" "time" - "github.com/compose-spec/compose-go/template" - "github.com/compose-spec/compose-go/utils" "github.com/coreos/go-semver/semver" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" @@ -790,103 +788,6 @@ func parseConfig(contents []byte) (*ast.Table, error) { return toml.Parse(outputBytes) } -func removeComments(contents []byte) ([]byte, error) { - tomlReader := bytes.NewReader(contents) - - // Initialize variables for tracking state - var inQuote, inComment, escaped bool - var quoteChar byte - - // Initialize buffer for modified TOML data - var output bytes.Buffer - - buf := make([]byte, 1) - // Iterate over each character in the file - for { - _, err := tomlReader.Read(buf) - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return nil, err - } - char := buf[0] - - // Toggle the escaped state at backslash to we have true every odd occurrence. - if char == '\\' { - escaped = !escaped - } - - if inComment { - // If we're currently in a comment, check if this character ends the comment - if char == '\n' { - // End of line, comment is finished - inComment = false - _, _ = output.WriteRune('\n') - } - } else if inQuote { - // If we're currently in a quote, check if this character ends the quote - if char == quoteChar && !escaped { - // End of quote, we're no longer in a quote - inQuote = false - } - output.WriteByte(char) - } else { - // Not in a comment or a quote - if (char == '"' || char == '\'') && !escaped { - // Start of quote - inQuote = true - quoteChar = char - output.WriteByte(char) - } else if char == '#' && !escaped { - // Start of comment - inComment = true - } else { - // Not a comment or a quote, just output the character - output.WriteByte(char) - } - } - - // Reset escaping if any other character occurred - if char != '\\' { - escaped = false - } - } - return output.Bytes(), nil -} - -func substituteEnvironment(contents []byte, oldReplacementBehavior bool) ([]byte, error) { - options := []template.Option{ - template.WithReplacementFunction(func(s string, m template.Mapping, cfg *template.Config) (string, error) { - result, applied, err := template.DefaultReplacementAppliedFunc(s, m, cfg) - if err == nil && !applied { - // Keep undeclared environment-variable patterns to reproduce - // pre-v1.27 behavior - return s, nil - } - if err != nil && strings.HasPrefix(err.Error(), "Invalid template:") { - // Keep invalid template patterns to ignore regexp substitutions - // like ${1} - return s, nil - } - return result, err - }), - template.WithoutLogging, - } - if oldReplacementBehavior { - options = append(options, template.WithPattern(oldVarRe)) - } - - envMap := utils.GetAsEqualsMap(os.Environ()) - retVal, err := template.SubstituteWithOptions(string(contents), func(k string) (string, bool) { - if v, ok := envMap[k]; ok { - return v, ok - } - return "", false - }, options...) - return []byte(retVal), err -} - func (c *Config) addAggregator(name string, table *ast.Table) error { creator, ok := aggregators.Aggregators[name] if !ok { diff --git a/config/config_test.go b/config/config_test.go index 3a58a912d9f59..870e7a2b8939e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -621,7 +621,7 @@ func TestConfig_SerializerInterfaceNewFormat(t *testing.T) { options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) } - // Do a manual comparision as require.EqualValues will also work on unexported fields + // Do a manual comparison as require.EqualValues will also work on unexported fields // that cannot be cleared or ignored. diff := cmp.Diff(expected[i], actual[i], options...) require.Emptyf(t, diff, "Difference in SetSerializer() for %q", format) @@ -820,7 +820,7 @@ func TestConfig_ParserInterface(t *testing.T) { options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) } - // Do a manual comparision as require.EqualValues will also work on unexported fields + // Do a manual comparison as require.EqualValues will also work on unexported fields // that cannot be cleared or ignored. diff := cmp.Diff(expected[i], actual[i], options...) require.Emptyf(t, diff, "Difference in SetParser() for %q", format) @@ -1039,7 +1039,7 @@ func TestConfig_ProcessorsWithParsers(t *testing.T) { options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) } - // Do a manual comparision as require.EqualValues will also work on unexported fields + // Do a manual comparison as require.EqualValues will also work on unexported fields // that cannot be cleared or ignored. diff := cmp.Diff(expected[i], actual[i], options...) require.Emptyf(t, diff, "Difference in SetParser() for %q", format) diff --git a/config/deprecation.go b/config/deprecation.go index c0f7ac682b78a..52c315c38dee1 100644 --- a/config/deprecation.go +++ b/config/deprecation.go @@ -297,7 +297,7 @@ func walkPluginStruct(value reflect.Value, fn func(f reflect.StructField, fv ref } // Walk over the struct fields and call the given function. If we encounter more complex embedded - // elements (stucts, slices/arrays, maps) we need to descend into those elements as they might + // elements (structs, slices/arrays, maps) we need to descend into those elements as they might // contain structures nested in the current structure. for i := 0; i < t.NumField(); i++ { field := t.Field(i) diff --git a/config/envvar.go b/config/envvar.go new file mode 100644 index 0000000000000..09f85a4ae04a7 --- /dev/null +++ b/config/envvar.go @@ -0,0 +1,253 @@ +package config + +import ( + "bytes" + "errors" + "io" + "os" + "strings" + + "github.com/compose-spec/compose-go/template" + "github.com/compose-spec/compose-go/utils" +) + +type trimmer struct { + input *bytes.Reader + output bytes.Buffer +} + +func removeComments(buf []byte) ([]byte, error) { + t := &trimmer{ + input: bytes.NewReader(buf), + output: bytes.Buffer{}, + } + err := t.process() + return t.output.Bytes(), err +} + +func (t *trimmer) process() error { + for { + // Read the next byte until EOF + c, err := t.input.ReadByte() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + + // Switch states if we need to + switch c { + case '\\': + _ = t.input.UnreadByte() + err = t.escape() + case '\'': + _ = t.input.UnreadByte() + if t.hasNQuotes(c, 3) { + err = t.tripleSingleQuote() + } else { + err = t.singleQuote() + } + case '"': + _ = t.input.UnreadByte() + if t.hasNQuotes(c, 3) { + err = t.tripleDoubleQuote() + } else { + err = t.doubleQuote() + } + case '#': + err = t.comment() + default: + if err := t.output.WriteByte(c); err != nil { + return err + } + continue + } + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + } + return nil +} + +func (t *trimmer) hasNQuotes(ref byte, limit int64) bool { + var count int64 + // Look ahead check if the next characters are what we expect + for count = 0; count < limit; count++ { + c, err := t.input.ReadByte() + if err != nil || c != ref { + break + } + } + // We also need to unread the non-matching character + offset := -count + if count < limit { + offset-- + } + // Unread the matched characters + _, _ = t.input.Seek(offset, io.SeekCurrent) + return count >= limit +} + +func (t *trimmer) readWriteByte() (byte, error) { + c, err := t.input.ReadByte() + if err != nil { + return 0, err + } + return c, t.output.WriteByte(c) +} + +func (t *trimmer) escape() error { + // Consumer the known starting backslash and quote + _, _ = t.readWriteByte() + + // Read the next character which is the escaped one and exit + _, err := t.readWriteByte() + return err +} + +func (t *trimmer) singleQuote() error { + // Consumer the known starting quote + _, _ = t.readWriteByte() + + // Read bytes until EOF, line end or another single quote + for { + if c, err := t.readWriteByte(); err != nil || c == '\'' || c == '\n' { + return err + } + } +} + +func (t *trimmer) tripleSingleQuote() error { + for i := 0; i < 3; i++ { + // Consumer the known starting quotes + _, _ = t.readWriteByte() + } + + // Read bytes until EOF or another set of triple single quotes + for { + c, err := t.readWriteByte() + if err != nil { + return err + } + + if c == '\'' && t.hasNQuotes('\'', 2) { + // Consumer the two additional ending quotes + _, _ = t.readWriteByte() + _, _ = t.readWriteByte() + return nil + } + } +} + +func (t *trimmer) doubleQuote() error { + // Consumer the known starting quote + _, _ = t.readWriteByte() + + // Read bytes until EOF, line end or another double quote + for { + c, err := t.input.ReadByte() + if err != nil { + return err + } + switch c { + case '\\': + // Found escaped character + _ = t.input.UnreadByte() + if err := t.escape(); err != nil { + return err + } + continue + case '"', '\n': + // Found terminator + return t.output.WriteByte(c) + } + if err := t.output.WriteByte(c); err != nil { + return err + } + } +} + +func (t *trimmer) tripleDoubleQuote() error { + for i := 0; i < 3; i++ { + // Consumer the known starting quotes + _, _ = t.readWriteByte() + } + + // Read bytes until EOF or another set of triple double quotes + for { + c, err := t.input.ReadByte() + if err != nil { + return err + } + switch c { + case '\\': + // Found escaped character + _ = t.input.UnreadByte() + if err := t.escape(); err != nil { + return err + } + continue + case '"': + _ = t.output.WriteByte(c) + if t.hasNQuotes('"', 2) { + // Consumer the two additional ending quotes + _, _ = t.readWriteByte() + _, _ = t.readWriteByte() + return nil + } + continue + } + if err := t.output.WriteByte(c); err != nil { + return err + } + } +} + +func (t *trimmer) comment() error { + // Read bytes until EOF or a line break + for { + c, err := t.input.ReadByte() + if err != nil { + return err + } + if c == '\n' { + return t.output.WriteByte(c) + } + } +} + +func substituteEnvironment(contents []byte, oldReplacementBehavior bool) ([]byte, error) { + options := []template.Option{ + template.WithReplacementFunction(func(s string, m template.Mapping, cfg *template.Config) (string, error) { + result, applied, err := template.DefaultReplacementAppliedFunc(s, m, cfg) + if err == nil && !applied { + // Keep undeclared environment-variable patterns to reproduce + // pre-v1.27 behavior + return s, nil + } + if err != nil && strings.HasPrefix(err.Error(), "Invalid template:") { + // Keep invalid template patterns to ignore regexp substitutions + // like ${1} + return s, nil + } + return result, err + }), + template.WithoutLogging, + } + if oldReplacementBehavior { + options = append(options, template.WithPattern(oldVarRe)) + } + + envMap := utils.GetAsEqualsMap(os.Environ()) + retVal, err := template.SubstituteWithOptions(string(contents), func(k string) (string, bool) { + if v, ok := envMap[k]; ok { + return v, ok + } + return "", false + }, options...) + return []byte(retVal), err +} diff --git a/config/internal_test.go b/config/internal_test.go index b6cd9e482a550..5692cd19b6f5b 100644 --- a/config/internal_test.go +++ b/config/internal_test.go @@ -1,9 +1,12 @@ package config import ( + "bytes" "fmt" "net/http" "net/http/httptest" + "os" + "path/filepath" "testing" "time" @@ -351,6 +354,26 @@ func TestParseConfig(t *testing.T) { } } +func TestRemoveComments(t *testing.T) { + // Read expectation + expected, err := os.ReadFile(filepath.Join("testdata", "envvar_comments_expected.toml")) + require.NoError(t, err) + + // Read the file and remove the comments + buf, err := os.ReadFile(filepath.Join("testdata", "envvar_comments.toml")) + require.NoError(t, err) + removed, err := removeComments(buf) + require.NoError(t, err) + lines := bytes.Split(removed, []byte{'\n'}) + for i, line := range lines { + lines[i] = bytes.TrimRight(line, " \t") + } + actual := bytes.Join(lines, []byte{'\n'}) + + // Do the comparison + require.Equal(t, string(expected), string(actual)) +} + func TestURLRetries3Fails(t *testing.T) { httpLoadConfigRetryInterval = 0 * time.Second responseCounter := 0 diff --git a/config/testdata/envvar_comments.toml b/config/testdata/envvar_comments.toml new file mode 100644 index 0000000000000..5f35f04327c22 --- /dev/null +++ b/config/testdata/envvar_comments.toml @@ -0,0 +1,99 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + +[global_tags] + +[agent] +interval = "10s" +round_interval = true +metric_batch_size = 1000 +metric_buffer_limit = 10000 +collection_jitter = "0s" +flush_interval = '10s' +flush_jitter = "0s" +precision = "" +hostname = '' +omit_hostname = false + +[[outputs.influxdb]] + setting1 = '#'#test + setting2 = '''#'''#test + setting3 = "#"#test + setting4 = """#"""#test + wicked1 = "\""#test + wicked2 = """\""""#test + +[[inputs.cpu]] + percpu = true + #totalcpu = true + # collect_cpu_time = false + ## report_active = false + +[[a.plugin]] + mylist = [ + "value 1", # a good value + "value 2", # a better value + "value 3", "value 4", + 'value5', """tagwith#value""", + ] # Should work + +[[some.stuff]] + a = 'not a #comment' + b = '''not a #comment''' + c = "not a #comment" + d = """not a #comment""" + e = '''not a #comment containing "quotes"''' + f = '''not a #comment containing 'quotes'?''' + g = """not a #comment containing "quotes"?""" + +# Issue #14237 +[[inputs.myplugin]] +value = '''This isn't a #comment.''' + +[[processors.starlark]] + script = """ +# Drop fields if they contain a string. +# +# Example Input: +# measurement,host=hostname a=1,b="somestring" 1597255410000000000 +# +# Example Output: +# measurement,host=hostname a=1 1597255410000000000 + +def apply(metric): + for k, v in metric.fields.items(): + if type(v) == "string": + metric.fields.pop(k) + + return metric +""" + +[[processors.starlark]] + script = ''' +# Drop fields if they contain a string. +# +# Example Input: +# measurement,host=hostname a=1,b="somestring" 1597255410000000000 +# +# Example Output: +# measurement,host=hostname a=1 1597255410000000000 + +def apply(metric): + for k, v in metric.fields.items(): + if type(v) == "string": + metric.fields.pop(k) + + return metric +''' diff --git a/config/testdata/envvar_comments_expected.toml b/config/testdata/envvar_comments_expected.toml new file mode 100644 index 0000000000000..3e38656fd9217 --- /dev/null +++ b/config/testdata/envvar_comments_expected.toml @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + +[global_tags] + +[agent] +interval = "10s" +round_interval = true +metric_batch_size = 1000 +metric_buffer_limit = 10000 +collection_jitter = "0s" +flush_interval = '10s' +flush_jitter = "0s" +precision = "" +hostname = '' +omit_hostname = false + +[[outputs.influxdb]] + setting1 = '#' + setting2 = '''#''' + setting3 = "#" + setting4 = """#""" + wicked1 = "\"" + wicked2 = """\"""" + +[[inputs.cpu]] + percpu = true + + + + +[[a.plugin]] + mylist = [ + "value 1", + "value 2", + "value 3", "value 4", + 'value5', """tagwith#value""", + ] + +[[some.stuff]] + a = 'not a #comment' + b = '''not a #comment''' + c = "not a #comment" + d = """not a #comment""" + e = '''not a #comment containing "quotes"''' + f = '''not a #comment containing 'quotes'?''' + g = """not a #comment containing "quotes"?""" + + +[[inputs.myplugin]] +value = '''This isn't a #comment.''' + +[[processors.starlark]] + script = """ +# Drop fields if they contain a string. +# +# Example Input: +# measurement,host=hostname a=1,b="somestring" 1597255410000000000 +# +# Example Output: +# measurement,host=hostname a=1 1597255410000000000 + +def apply(metric): + for k, v in metric.fields.items(): + if type(v) == "string": + metric.fields.pop(k) + + return metric +""" + +[[processors.starlark]] + script = ''' +# Drop fields if they contain a string. +# +# Example Input: +# measurement,host=hostname a=1,b="somestring" 1597255410000000000 +# +# Example Output: +# measurement,host=hostname a=1 1597255410000000000 + +def apply(metric): + for k, v in metric.fields.items(): + if type(v) == "string": + metric.fields.pop(k) + + return metric +''' diff --git a/config/types_test.go b/config/types_test.go index 3a0d352fa1ff3..f66dc9b028b8c 100644 --- a/config/types_test.go +++ b/config/types_test.go @@ -29,7 +29,7 @@ func TestConfigDuration(t *testing.T) { p := c.Processors[0].Processor.(*reverse_dns.ReverseDNS) require.EqualValues(t, p.CacheTTL, 3*time.Hour) require.EqualValues(t, p.LookupTimeout, 17*time.Second) - require.Equal(t, p.MaxParallelLookups, 13) + require.Equal(t, 13, p.MaxParallelLookups) require.True(t, p.Ordered) } diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index a9034f5cd3dc3..dc00fc8bfa726 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -231,7 +231,7 @@ combining an integer value and time unit as a string value. Valid time units ar Global tags can be specified in the `[global_tags]` table in key="value" format. All metrics that are gathered will be tagged with the tags specified. -Global tags are overriden by tags set by plugins. +Global tags are overridden by tags set by plugins. ```toml [global_tags] @@ -375,7 +375,7 @@ Parameters that can be used with any input plugin: Overrides the `precision` setting of the [agent][Agent] for the plugin. Collected metrics are rounded to the precision specified as an [interval][]. - When this value is set on a service input, multiple events occuring at the + When this value is set on a service input, multiple events occurring at the same timestamp may be merged by the output database. - **collection_jitter**: diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 2f6412802055a..254e0c4926dfc 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -26,6 +26,8 @@ following works: - github.com/Azure/go-ntlmssp [MIT License](https://github.com/Azure/go-ntlmssp/blob/master/LICENSE) - github.com/AzureAD/microsoft-authentication-library-for-go [MIT License](https://github.com/AzureAD/microsoft-authentication-library-for-go/blob/main/LICENSE) - github.com/ClickHouse/clickhouse-go [MIT License](https://github.com/ClickHouse/clickhouse-go/blob/master/LICENSE) +- github.com/IBM/nzgo [MIT License](https://github.com/IBM/nzgo/blob/master/LICENSE.md) +- github.com/IBM/sarama [MIT License](https://github.com/IBM/sarama/blob/master/LICENSE.md) - github.com/JohnCGriffin/overflow [MIT License](https://github.com/JohnCGriffin/overflow/blob/master/README.md) - github.com/Masterminds/goutils [Apache License 2.0](https://github.com/Masterminds/goutils/blob/master/LICENSE.txt) - github.com/Masterminds/semver [MIT License](https://github.com/Masterminds/semver/blob/master/LICENSE.txt) @@ -243,7 +245,7 @@ following works: - github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md) - github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md) - github.com/mdlayher/socket [MIT License](https://github.com/mdlayher/socket/blob/master/LICENSE.md) -- github.com/mdlayher/vsock [MIT License](https://github.com/mdlayher/vsock/blob/master/LICENSE.md) +- github.com/mdlayher/vsock [MIT License](https://github.com/mdlayher/vsock/blob/main/LICENSE.md) - github.com/microsoft/ApplicationInsights-Go [MIT License](https://github.com/microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/microsoft/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/microsoft/go-mssqldb/blob/master/LICENSE.txt) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) diff --git a/docs/SQL_DRIVERS_INPUT.md b/docs/SQL_DRIVERS_INPUT.md index 7d1606dc9b9d7..d2ce27ca8ade9 100644 --- a/docs/SQL_DRIVERS_INPUT.md +++ b/docs/SQL_DRIVERS_INPUT.md @@ -15,6 +15,7 @@ might change between versions. Please check the driver documentation for availab | TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | | ClickHouse | [clickhouse](https://github.com/ClickHouse/clickhouse-go) | | `tcp://host:port[?param1=value&...¶mN=value]"` | see [clickhouse-go docs](https://github.com/ClickHouse/clickhouse-go#dsn) for more information | | Oracle | [oracle](https://github.com/sijms/go-ora) | oracle | `oracle://username:password@host:port/service?param1=value¶m2=value` | see [driver docs](https://github.com/sijms/go-ora/blob/master/README.md) for more information | +| IBM Netezza | [nzgo](https://github.com/IBM/nzgo) | |`host=your_nz_host port=5480 user=your_nz_user password=your_nz_password dbname=your_nz_db_name sslmode=disable`| see [driver docs](https://pkg.go.dev/github.com/IBM/nzgo/v12) for more information | ## Comments diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md index 569d44eac56d6..849f4f7ecf03d 100644 --- a/docs/developers/REVIEWS.md +++ b/docs/developers/REVIEWS.md @@ -70,7 +70,7 @@ In case you still want to continue with the PR, feel free to reopen it. ## Linting -Each pull request will have the appriopriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. +Each pull request will have the appropriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. ## Testing diff --git a/go.mod b/go.mod index 3448e395a140d..62123d11a0bca 100644 --- a/go.mod +++ b/go.mod @@ -3,16 +3,16 @@ module github.com/influxdata/telegraf go 1.21 require ( - cloud.google.com/go/bigquery v1.56.0 - cloud.google.com/go/monitoring v1.16.0 + cloud.google.com/go/bigquery v1.57.1 + cloud.google.com/go/monitoring v1.16.1 cloud.google.com/go/pubsub v1.33.0 - cloud.google.com/go/storage v1.30.1 + cloud.google.com/go/storage v1.34.1 collectd.org v0.5.0 github.com/99designs/keyring v1.2.2 github.com/Azure/azure-event-hubs-go/v3 v3.6.1 github.com/Azure/azure-kusto-go v0.13.1 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.10.1 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.11.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/adal v0.9.23 @@ -20,6 +20,8 @@ require ( github.com/BurntSushi/toml v1.3.2 github.com/ClickHouse/clickhouse-go v1.5.4 github.com/DATA-DOG/go-sqlmock v1.5.0 + github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48 + github.com/IBM/sarama v1.41.3 github.com/Masterminds/sprig v2.22.0+incompatible github.com/Masterminds/sprig/v3 v3.2.3 github.com/Mellanox/rdmamap v1.1.0 @@ -38,18 +40,18 @@ require ( github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/awnumar/memguard v0.22.3 - github.com/aws/aws-sdk-go-v2 v1.21.2 + github.com/aws/aws-sdk-go-v2 v1.23.1 github.com/aws/aws-sdk-go-v2/config v1.19.1 github.com/aws/aws-sdk-go-v2/credentials v1.13.43 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.26.2 - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.23.5 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.27.2 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.20.0 github.com/aws/aws-sdk-go-v2/service/ec2 v1.120.0 github.com/aws/aws-sdk-go-v2/service/kinesis v1.18.5 github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.17.2 - github.com/aws/smithy-go v1.15.0 + github.com/aws/smithy-go v1.17.0 github.com/benbjohnson/clock v1.3.5 github.com/blues/jsonata-go v1.5.4 github.com/bmatcuk/doublestar/v3 v3.0.0 @@ -61,6 +63,7 @@ require ( github.com/coocood/freecache v1.2.3 github.com/coreos/go-semver v0.3.1 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f + github.com/coreos/go-systemd/v22 v22.5.0 github.com/couchbase/go-couchbase v0.1.1 github.com/digitalocean/go-libvirt v0.0.0-20220811165305-15feff002086 github.com/dimchansky/utfbom v1.1.1 @@ -89,16 +92,16 @@ require ( github.com/google/go-github/v32 v32.1.0 github.com/google/gopacket v1.1.19 github.com/google/licensecheck v0.3.1 - github.com/google/uuid v1.3.1 + github.com/google/uuid v1.4.0 github.com/gopcua/opcua v0.4.0 - github.com/gophercloud/gophercloud v1.5.0 + github.com/gophercloud/gophercloud v1.7.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 - github.com/gosnmp/gosnmp v1.35.1-0.20230602062452-f30602b8dad6 + github.com/gosnmp/gosnmp v1.36.1 github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9 github.com/gwos/tcg/sdk v0.0.0-20220621192633-df0eac0a1a4c github.com/harlow/kinesis-consumer v0.3.6-0.20211204214318-c2b9f79d7ab6 - github.com/hashicorp/consul/api v1.25.1 + github.com/hashicorp/consul/api v1.26.1 github.com/hashicorp/go-uuid v1.0.3 github.com/influxdata/go-syslog/v3 v3.0.0 github.com/influxdata/influxdb-observability/common v0.5.6 @@ -120,7 +123,7 @@ require ( github.com/kardianos/service v1.2.2 github.com/karrick/godirwalk v1.16.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.17.0 + github.com/klauspost/compress v1.17.1 github.com/klauspost/pgzip v1.2.6 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/linkedin/goavro/v2 v2.12.0 @@ -128,7 +131,7 @@ require ( github.com/lxc/lxd v0.0.0-20220920163450-e9b4b514106a github.com/matttproud/golang_protobuf_extensions v1.0.4 github.com/mdlayher/apcupsd v0.0.0-20220319200143-473c7b5f3c6a - github.com/mdlayher/vsock v1.1.1 + github.com/mdlayher/vsock v1.2.1 github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/microsoft/go-mssqldb v1.5.0 github.com/miekg/dns v1.1.56 @@ -152,20 +155,20 @@ require ( github.com/pion/dtls/v2 v2.2.7 github.com/prometheus-community/pro-bing v0.3.0 github.com/prometheus/client_golang v1.17.0 - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 + github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.44.0 github.com/prometheus/procfs v0.11.1 - github.com/prometheus/prometheus v0.46.0 - github.com/rabbitmq/amqp091-go v1.8.1 + github.com/prometheus/prometheus v0.48.0 + github.com/rabbitmq/amqp091-go v1.9.0 github.com/redis/go-redis/v9 v9.2.1 github.com/riemann/riemann-go-client v0.5.1-0.20211206220514-f58f10cdce16 github.com/robbiet480/go.nut v0.0.0-20220219091450-bd8f121e1fa1 - github.com/robinson/gos7 v0.0.0-20231012111941-bdaa10e92e16 + github.com/robinson/gos7 v0.0.0-20231031082500-fb5a72fd499e github.com/safchain/ethtool v0.3.0 github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 github.com/sensu/sensu-go/api/core/v2 v2.16.0 - github.com/shirou/gopsutil/v3 v3.23.9 - github.com/showwin/speedtest-go v1.6.6 + github.com/shirou/gopsutil/v3 v3.23.10 + github.com/showwin/speedtest-go v1.6.7 github.com/signalfx/golib/v3 v3.3.53 github.com/sirupsen/logrus v1.9.3 github.com/sleepinggenius2/gosmi v0.4.4 @@ -175,43 +178,44 @@ require ( github.com/stretchr/testify v1.8.4 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 github.com/testcontainers/testcontainers-go v0.26.0 + github.com/testcontainers/testcontainers-go/modules/kafka v0.26.1-0.20231116140448-68d5f8983d09 github.com/thomasklein94/packer-plugin-libvirt v0.5.0 github.com/tidwall/gjson v1.14.4 github.com/tinylib/msgp v1.1.8 github.com/urfave/cli/v2 v2.25.7 github.com/vapourismo/knx-go v0.0.0-20220829185957-fb5458a5389d github.com/vjeantet/grok v1.0.1 - github.com/vmware/govmomi v0.32.0 + github.com/vmware/govmomi v0.33.1 github.com/wavefronthq/wavefront-sdk-go v0.15.0 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf github.com/x448/float16 v0.8.4 github.com/xdg/scram v1.0.5 github.com/yuin/goldmark v1.5.6 go.mongodb.org/mongo-driver v1.12.1 - go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.39.0 - go.opentelemetry.io/otel/sdk/metric v0.39.0 + go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 + go.opentelemetry.io/otel/sdk/metric v1.21.0 go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd golang.org/x/crypto v0.14.0 - golang.org/x/mod v0.13.0 + golang.org/x/mod v0.14.0 golang.org/x/net v0.17.0 golang.org/x/oauth2 v0.13.0 - golang.org/x/sync v0.4.0 - golang.org/x/sys v0.13.0 + golang.org/x/sync v0.5.0 + golang.org/x/sys v0.14.0 golang.org/x/term v0.13.0 - golang.org/x/text v0.13.0 + golang.org/x/text v0.14.0 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 gonum.org/v1/gonum v0.14.0 - google.golang.org/api v0.147.0 - google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 - google.golang.org/grpc v1.58.3 + google.golang.org/api v0.150.0 + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b + google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 gopkg.in/gorethink/gorethink.v3 v3.0.5 gopkg.in/olivere/elastic.v5 v5.0.86 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.28.3 - k8s.io/apimachinery v0.28.3 + k8s.io/api v0.28.4 + k8s.io/apimachinery v0.28.4 k8s.io/client-go v0.28.3 layeh.com/radius v0.0.0-20221205141417-e7fbddd11d68 modernc.org/sqlite v1.24.0 @@ -219,18 +223,18 @@ require ( require ( cloud.google.com/go v0.110.8 // indirect - cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute v1.23.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.2 // indirect + cloud.google.com/go/iam v1.1.3 // indirect code.cloudfoundry.org/clock v1.0.0 // indirect dario.cat/mergo v1.0.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/Azure/azure-amqp-common-go/v4 v4.2.0 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0-beta.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect github.com/Azure/go-amqp v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect @@ -242,11 +246,11 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/semver/v3 v3.2.0 + github.com/Masterminds/semver/v3 v3.2.1 github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.11.1 // indirect github.com/alecthomas/participle v0.4.1 // indirect @@ -259,8 +263,8 @@ require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.70 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect @@ -297,8 +301,8 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/dvsekhvalnov/jose2go v1.5.0 // indirect - github.com/eapache/go-resiliency v1.3.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect + github.com/eapache/go-resiliency v1.4.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/echlebek/timeproxy v1.0.0 // indirect github.com/emicklei/go-restful/v3 v3.10.2 // indirect @@ -307,7 +311,7 @@ require ( github.com/fxamacker/cbor v1.5.1 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-macaroon-bakery/macaroonpb v1.0.0 // indirect github.com/go-openapi/jsonpointer v0.20.0 // indirect @@ -318,6 +322,7 @@ require ( github.com/goburrow/serial v0.1.1-0.20211022031912-bfb69110f8dd // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect @@ -330,7 +335,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect @@ -359,7 +364,7 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/josharian/native v1.0.0 // indirect + github.com/josharian/native v1.1.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/juju/webbrowser v1.0.0 // indirect @@ -376,8 +381,8 @@ require ( github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mdlayher/genetlink v1.2.0 // indirect - github.com/mdlayher/netlink v1.6.0 // indirect - github.com/mdlayher/socket v0.2.3 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/socket v0.4.1 // indirect github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/minio/highwayhash v1.0.2 // indirect @@ -454,25 +459,23 @@ require ( go.etcd.io/etcd/api/v3 v3.5.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/consumer v0.84.0 // indirect - go.opentelemetry.io/collector/semconv v0.84.0 // indirect - go.opentelemetry.io/otel v1.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/otel/sdk v1.16.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect + go.opentelemetry.io/collector/semconv v0.87.0 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/sdk v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.13.0 // indirect + golang.org/x/tools v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect + google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect gopkg.in/errgo.v1 v1.0.1 // indirect gopkg.in/fatih/pool.v2 v2.0.0 // indirect gopkg.in/fsnotify.v1 v1.4.7 // indirect diff --git a/go.sum b/go.sum index 343b4afa2149a..82cee408ecfea 100644 --- a/go.sum +++ b/go.sum @@ -127,8 +127,8 @@ cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/Zur cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/bigquery v1.56.0 h1:LHIc9E7Kw+ftFpQFKzZYBB88IAFz7qONawXXx0F3QBo= -cloud.google.com/go/bigquery v1.56.0/go.mod h1:KDcsploXTEY7XT3fDQzMUZlpQLHzE4itubHrnmhUrZA= +cloud.google.com/go/bigquery v1.57.1 h1:FiULdbbzUxWD0Y4ZGPSVCDLvqRSyCIO6zKV7E2nf5uA= +cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= @@ -176,8 +176,8 @@ cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63 cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0= +cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -203,8 +203,8 @@ cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOX cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/datacatalog v1.17.1 h1:qGWrlYvWtK+8jD1jhwq5BsGoSr7S4/LOroV7LwXi00g= -cloud.google.com/go/datacatalog v1.17.1/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= +cloud.google.com/go/datacatalog v1.18.1 h1:xJp9mZrc2HPaoxIz3sP9pCmf/impifweQ/yGG9VBfio= +cloud.google.com/go/datacatalog v1.18.1/go.mod h1:TzAWaz+ON1tkNr4MOcak8EBHX7wIRX/gZKM+yTVsv+A= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= @@ -319,8 +319,8 @@ cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGE cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= -cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.3 h1:18tKG7DzydKWUnLjonWcJO6wjSCAtzh4GcRKlH/Hrzc= +cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -340,8 +340,8 @@ cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4 cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/kms v1.15.2 h1:lh6qra6oC4AyWe5fUUUBe/S27k12OHAleOOOw6KakdE= -cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= +cloud.google.com/go/kms v1.15.3 h1:RYsbxTRmk91ydKCzekI2YjryO4c5Y2M80Zwcs9/D/cI= +cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= @@ -355,8 +355,8 @@ cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeN cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= -cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/longrunning v0.5.2 h1:u+oFqfEwwU7F9dIELigxbe0XVnBAo9wqMuQLA50CZ5k= +cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -380,8 +380,8 @@ cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhI cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/monitoring v1.16.0 h1:rlndy4K8yknMY9JuGe2aK4SbCh21FXoCdX7SAGHmRgI= -cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= +cloud.google.com/go/monitoring v1.16.1 h1:CTklIuUkS5nCricGojPwdkSgPsCTX2HmYTxFDg+UvpU= +cloud.google.com/go/monitoring v1.16.1/go.mod h1:6HsxddR+3y9j+o/cMJH6q/KJ/CBTvM/38L/1m7bTRJ4= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -547,8 +547,8 @@ cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeL cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= -cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/storage v1.34.1 h1:H2Af2dU5J0PF7A5B+ECFIce+RqxVnrVilO+cu0TS3MI= +cloud.google.com/go/storage v1.34.1/go.mod h1:VN1ElqqvR9adg1k9xlkUJ55cMOP1/QjnNNuT5xQL6dY= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= @@ -642,20 +642,22 @@ github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0-beta.1 h1:8t6ZZtkOCl+rx7uBn40Nj62ABVGkXK69U/En44wJIlE= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0-beta.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.10.1 h1:UpPmkj6X+Nha33xdAXX2wcTNTh9tU6vhCeM/QZIK4q8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.10.1/go.mod h1:X7+7NZAH9UZInvhObje8DjGbHOQZPzhDUkAvKiX6t00= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.11.0 h1:Ds0KRF8ggpEGg4Vo42oX1cIt/IfOhHWJBikksZbVxeg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.11.0/go.mod h1:jj6P8ybImR+5topJ+eH6fgcemSFBmU6/6bFF8KkwuDI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= @@ -692,8 +694,9 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= @@ -708,6 +711,10 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3 github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48 h1:TBb4IxmBH0ssmWTUg0C6c9ZnfDmZospTF8f+YbHnbbA= +github.com/IBM/nzgo/v12 v12.0.9-0.20231115043259-49c27f2dfe48/go.mod h1:4pvfEkfsrAdqlljsp8HNwv/uzNKy2fzoXBB1aRIssJg= +github.com/IBM/sarama v1.41.3 h1:MWBEJ12vHC8coMjdEXFq/6ftO6DUZnQlFYcxtOJFa7c= +github.com/IBM/sarama v1.41.3/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -715,8 +722,9 @@ github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy86 github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= @@ -821,8 +829,9 @@ github.com/aws/aws-sdk-go-v2 v1.11.2/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJ github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.1/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= -github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA= github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= +github.com/aws/aws-sdk-go-v2 v1.23.1 h1:qXaFsOOMA+HsZtX8WoCa+gJnbyW7qyFFBlPqvTSzbaI= +github.com/aws/aws-sdk-go-v2 v1.23.1/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM= @@ -849,13 +858,15 @@ github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.34/go.mod h1:wZpTEecJe0Btj3IYnDx/VlUzor9wm3fJHyvLpQF0VwY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4 h1:LAm3Ycm9HJfbSCd5I+wqC2S9Ej7FPrgr5CQoOljJZcE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4/go.mod h1:xEhvbJcyUf/31yfGSQBe01fukXwXJ0gxDp7rLfymWE0= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.28/go.mod h1:7VRpKQQedkfIEXb4k52I7swUnZP0wohVajJMRn3vsUw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4 h1:4GV0kKZzUxiWxSVpn/9gwR0g21NF1Jsyduzo9rHgC/Q= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4/go.mod h1:dYvTNAggxDZy6y1AF7YDwXsPuHFy/VNEpEI/2dWK9IU= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.35/go.mod h1:0Eg1YjxE0Bhn56lx+SHJwCzhW+2JGtizsrx+lCqrfm0= @@ -865,8 +876,8 @@ github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26 h1:wscW+pnn3J1OYnanMnza5ZVYXLX github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.26/go.mod h1:MtYiox5gvyB+OyP0Mr0Sm/yzbEAIPL9eijj/ouHAPw0= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.26.2 h1:PWGu2JhCb/XJlJ7SSFJq76pxk4xWsN76nZxh7TzMHx0= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.26.2/go.mod h1:2KOZkkzMDZCo/aLzPhys06mHNkiU74u85aMJA3PLRvg= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.23.5 h1:/rXnxd9VGnTc5fLuSFKkWCy+kDP6CxXAIMvfJQEfx8U= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.23.5/go.mod h1:5v2ZNXCSwG73rx0k3sCuB1Ju8sbEbG0iUlxCA7D8sV8= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.27.2 h1:zl57IYAWKaRzQiy2WzOeBt/ckXlGlvD9S2cjJh43uAo= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.27.2/go.mod h1:NRP65i31tm0UhGwc9j6TGwk7dMs1ZDprZPIHfr+gHCU= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.20.0 h1:ov790XKhwAziEXcl6WrjsbyWkGpboK7Cmikpe5gAzMw= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.20.0/go.mod h1:W1oiFegjVosgjIwb2Vv45jiCQT1ee8x85u8EyZRYLes= @@ -916,8 +927,9 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8= github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI= +github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= @@ -1018,6 +1030,8 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.1.1 h1:ClFXELcKj/ojyoTYbsY34QUrrYCBi/1G749sXSCkdhk= github.com/couchbase/go-couchbase v0.1.1/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A= @@ -1081,10 +1095,10 @@ github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQx github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0 h1:wHGPJSXvwKQVf/XfhjUPyrhpcPKWNy8F3ikH+eiwoBg= github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0/go.mod h1:PseHFo8Leko7J4A/TfZ6kkHdkzKBLUta6hRZR/OEbbc= -github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= -github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= -github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= -github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/go-resiliency v1.4.0 h1:3OK9bWpPk5q6pbFAaYSEwD9CLUSHG8bnZuqX2yMt3B0= +github.com/eapache/go-resiliency v1.4.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/echlebek/crock v1.0.1 h1:KbzamClMIfVIkkjq/GTXf+N16KylYBpiaTitO3f1ujg= @@ -1175,8 +1189,8 @@ github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-macaroon-bakery/macaroonpb v1.0.0 h1:It9exBaRMZ9iix1iJ6gwzfwsDE6ExNuwtAJ9e09v6XE= @@ -1223,6 +1237,8 @@ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= @@ -1250,8 +1266,9 @@ github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXg github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1357,8 +1374,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= +github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/protobuf v3.11.4+incompatible/go.mod h1:lUQ9D1ePzbH2PrIS7ob/bjm9HXyH5WHB0Akwh7URreM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= @@ -1367,15 +1384,15 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= -github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1393,8 +1410,8 @@ github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopcua/opcua v0.4.0 h1:Pr0PMFViNOzvkcvmzP3yTKqtLFVL1OUgav3tDj+hpqQ= github.com/gopcua/opcua v0.4.0/go.mod h1:6BsaYGu33RhVRxnK+EqHWwSG+hYCSAMjyIjx3RGV1PQ= -github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450WpPH+yvXo= -github.com/gophercloud/gophercloud v1.5.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= +github.com/gophercloud/gophercloud v1.7.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -1405,8 +1422,8 @@ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gosnmp/gosnmp v1.35.1-0.20230602062452-f30602b8dad6 h1:pzIZ9ij5bf6vL8aSAoFoksiT7pZXyzBOhDdRlZUT89Q= -github.com/gosnmp/gosnmp v1.35.1-0.20230602062452-f30602b8dad6/go.mod h1:V8wQurBU216WENrmFCZVFV4bVcMWIb9ZmPVI5PoH80A= +github.com/gosnmp/gosnmp v1.36.1 h1:LaTyGWIM8Z91NmCUELJi45d+BtOafI8U82nVUGI1P+w= +github.com/gosnmp/gosnmp v1.36.1/go.mod h1:iLcZxN2MxKhH0jPQDVMZaSNypw1ykqVi27O79koQj6w= github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9 h1:Q7e9kXS3sRbTjsNDKazbcbDSGAKjFdk096M3qYbwNpE= github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9/go.mod h1:qVX2WhsI5xyAoM6I/MV1bXSKBPdLAjp7pCvieO/S0AY= github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= @@ -1429,11 +1446,11 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMW github.com/harlow/kinesis-consumer v0.3.6-0.20211204214318-c2b9f79d7ab6 h1:38nI+nE+oUmLmlNjuByhvnmuBrcQVLNkOJhSSM4eJv0= github.com/harlow/kinesis-consumer v0.3.6-0.20211204214318-c2b9f79d7ab6/go.mod h1:hNEr2hL0WPpm4BSILcClbOE/+Tew0JJnqCbTlc6jCUc= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= -github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g= +github.com/hashicorp/consul/api v1.26.1 h1:5oSXOO5fboPZeW5SN+TdGFP/BILDgBm19OrPZ/pICIM= +github.com/hashicorp/consul/api v1.26.1/go.mod h1:B4sQTeaSO16NtynqrAdwOlahJ7IUDZM9cj2420xYL8A= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= -github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= +github.com/hashicorp/consul/sdk v0.15.0 h1:2qK9nDrr4tiJKRoxPGhm6B7xJjLVIQqkjiab2M4aKjU= +github.com/hashicorp/consul/sdk v0.15.0/go.mod h1:r/OmRRPbHOe0yxNahLw7G9x5WG17E1BIECMtCjcPSNo= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -1599,8 +1616,9 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22 github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -1652,8 +1670,8 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= +github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= @@ -1763,16 +1781,17 @@ github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuri github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= github.com/mdlayher/netlink v1.4.2/go.mod h1:13VaingaArGUTUxFLf/iEovKxXji32JAtF858jZYEug= -github.com/mdlayher/netlink v1.6.0 h1:rOHX5yl7qnlpiVkFWoqccueppMtXzeziFjWAjLg6sz0= github.com/mdlayher/netlink v1.6.0/go.mod h1:0o3PlBmGst1xve7wQ7j/hwpNaFaH4qCRyWCdcZk8/vA= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= github.com/mdlayher/socket v0.0.0-20211007213009-516dcbdf0267/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= github.com/mdlayher/socket v0.1.1/go.mod h1:mYV5YIZAfHh4dzDVzI8x8tWLWCliuX8Mon5Awbj+qDs= -github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= -github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= -github.com/mdlayher/vsock v1.1.1 h1:8lFuiXQnmICBrCIIA9PMgVSke6Fg6V4+r0v7r55k88I= -github.com/mdlayher/vsock v1.1.1/go.mod h1:Y43jzcy7KM3QB+/FK15pfqGxDMCMzUXWegEfIbSM18U= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= +github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc= github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= @@ -1998,8 +2017,8 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -2012,11 +2031,11 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/prometheus/prometheus v0.46.0 h1:9JSdXnsuT6YsbODEhSQMwxNkGwPExfmzqG73vCMk/Kw= -github.com/prometheus/prometheus v0.46.0/go.mod h1:10L5IJE5CEsjee1FnOcVswYXlPIscDWWt3IJ2UDYrz4= +github.com/prometheus/prometheus v0.48.0 h1:yrBloImGQ7je4h8M10ujGh4R6oxYQJQKlMuETwNskGk= +github.com/prometheus/prometheus v0.48.0/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rabbitmq/amqp091-go v1.8.1 h1:RejT1SBUim5doqcL6s7iN6SBmsQqyTgXb1xMlH0h1hA= -github.com/rabbitmq/amqp091-go v1.8.1/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc= +github.com/rabbitmq/amqp091-go v1.9.0 h1:qrQtyzB4H8BQgEuJwhmVQqVHB9O4+MNDJCCAcpc3Aoo= +github.com/rabbitmq/amqp091-go v1.9.0/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.2.1 h1:WlYJg71ODF0dVspZZCpYmoF1+U1Jjk9Rwd7pq6QmlCg= @@ -2032,8 +2051,8 @@ github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1 github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/robinson/gos7 v0.0.0-20231012111941-bdaa10e92e16 h1:bhgGFmhTZpESybQyk+uxZ+dAd3yNSJSano2fN7HGmlA= -github.com/robinson/gos7 v0.0.0-20231012111941-bdaa10e92e16/go.mod h1:AMHIeh1KJ7Xa2RVOMHdv9jXKrpw0D4EWGGQMHLb2doc= +github.com/robinson/gos7 v0.0.0-20231031082500-fb5a72fd499e h1:Ofp6C2iX58K698sGpIXZFp3furntNlhIjeyLkcrAiek= +github.com/robinson/gos7 v0.0.0-20231031082500-fb5a72fd499e/go.mod h1:AMHIeh1KJ7Xa2RVOMHdv9jXKrpw0D4EWGGQMHLb2doc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= @@ -2041,8 +2060,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -2067,8 +2086,8 @@ github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod github.com/sensu/sensu-go/api/core/v2 v2.16.0 h1:HOq4rFkQ1S5ZjxmMTLc5J5mAbECrnKWvtXXbMqr3j9s= github.com/sensu/sensu-go/api/core/v2 v2.16.0/go.mod h1:MjM7+MCGEyTAgaZ589SiGHwYiaYF7N/58dU0J070u/0= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil/v3 v3.23.9 h1:ZI5bWVeu2ep4/DIxB4U9okeYJ7zp/QLTO4auRb/ty/E= -github.com/shirou/gopsutil/v3 v3.23.9/go.mod h1:x/NWSb71eMcjFIO0vhyGW5nZ7oSIgVjrCnADckb85GA= +github.com/shirou/gopsutil/v3 v3.23.10 h1:/N42opWlYzegYaVkWejXWJpbzKv2JDy3mrgGzKsh9hM= +github.com/shirou/gopsutil/v3 v3.23.10/go.mod h1:JIE26kpucQi+innVlAUnIEOSBhBUkirr5b44yr55+WE= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= @@ -2077,8 +2096,8 @@ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9Nz github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/showwin/speedtest-go v1.6.6 h1:WPq8vhD1aHzt23hJA+wHe3MLOXsVrMMOjU2UO98pOSc= -github.com/showwin/speedtest-go v1.6.6/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= +github.com/showwin/speedtest-go v1.6.7 h1:U0uMD9v4QS2JEqBoCHPWYQ1S9f87AnSSl5WsT+sBX6c= +github.com/showwin/speedtest-go v1.6.7/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 h1:32k2QLgsKhcEs55q4REPKyIadvid5FPy2+VMgvbmKJ0= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3/go.mod h1:gJrXWi7wSGXfiC7+VheQaz+ypdCt5SmZNL+BRxUe7y4= @@ -2162,6 +2181,8 @@ github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVl github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/testcontainers/testcontainers-go v0.26.0 h1:uqcYdoOHBy1ca7gKODfBd9uTHVK3a7UL848z09MVZ0c= github.com/testcontainers/testcontainers-go v0.26.0/go.mod h1:ICriE9bLX5CLxL9OFQ2N+2N+f+803LNJ1utJb1+Inx0= +github.com/testcontainers/testcontainers-go/modules/kafka v0.26.1-0.20231116140448-68d5f8983d09 h1:jqohCgCKphLrxHl6crzKJbmlmo8GYUNpTiw/Ib+AFLo= +github.com/testcontainers/testcontainers-go/modules/kafka v0.26.1-0.20231116140448-68d5f8983d09/go.mod h1:MBqGe6sHltLHRmjk1K1axtIboCjjATh3+oZObcWYFMg= github.com/thomasklein94/packer-plugin-libvirt v0.5.0 h1:aj2HLHZZM/ClGLIwVp9rrgh+2TOU/w4EiaZHAwCpOgs= github.com/thomasklein94/packer-plugin-libvirt v0.5.0/go.mod h1:GwN82FQ6KxCNKtS8LNUgLbwTZs90GGhBzCmTNkrTCrY= github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= @@ -2210,8 +2231,8 @@ github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1Y github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= -github.com/vmware/govmomi v0.32.0 h1:Rsdi/HAX5Ebf9Byp/FvBir4sfM7yP5DBUeRlbC6vLBo= -github.com/vmware/govmomi v0.32.0/go.mod h1:JA63Pg0SgQcSjk+LuPzjh3rJdcWBo/ZNCIwbb1qf2/0= +github.com/vmware/govmomi v0.33.1 h1:qS2VpEBd/WLbzLO5McI6h5o5zaKsrezUxRY5r9jkW8A= +github.com/vmware/govmomi v0.33.1/go.mod h1:QuzWGiEMA/FYlu5JXKjytiORQoxv2hTHdS2lWnIqKMM= github.com/wavefronthq/wavefront-sdk-go v0.15.0 h1:po9E3vh/0y7kOx8D9EtFp7kbSLLLKbmu/w/s1xGJAQU= github.com/wavefronthq/wavefront-sdk-go v0.15.0/go.mod h1:V72c8e+bXuLK8HpA6ioW0ll5mK9IPD+4IHNNDY75ksA= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q= @@ -2275,27 +2296,23 @@ go.opentelemetry.io/collector v0.84.0 h1:zzsegdPlDR0iJufPsHTJhXkv9q2kbpTTTI6nTyy go.opentelemetry.io/collector v0.84.0/go.mod h1:+cv/zxludfiiDuK3z+5eXkxAJhkCCcy8Chtvv0nOlr0= go.opentelemetry.io/collector/consumer v0.84.0 h1:sz8mXIdPACJArlRyFNXA1SScVoo954IU1qp9V78VUxc= go.opentelemetry.io/collector/consumer v0.84.0/go.mod h1:Mu+KeuorwHHWd6iGxU7DMAhgsHZmmzmQgf3sSWkugmM= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014 h1:iT5qH0NLmkGeIdDtnBogYDx7L58t6CaWGL378DEo2QY= -go.opentelemetry.io/collector/pdata v1.0.0-rcv0014/go.mod h1:BRvDrx43kiSoUx3mr7SoA7h9B8+OY99mUK+CZSQFWW4= -go.opentelemetry.io/collector/semconv v0.84.0 h1:sI1B8ebHhfJPd87iyba66TDnluVFvYu8CEpSjKHqIDc= -go.opentelemetry.io/collector/semconv v0.84.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0016 h1:qCPXSQCoD3qeWFb1RuIks8fw9Atxpk78bmtVdi15KhE= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0016/go.mod h1:OdN0alYOlYhHXu6BDlGehrZWgtBuiDsz/rlNeJeXiNg= +go.opentelemetry.io/collector/semconv v0.87.0 h1:BsG1jdLLRCBRlvUujk4QA86af7r/ZXnizczQpEs/gg8= +go.opentelemetry.io/collector/semconv v0.87.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 h1:f6BwB2OACc3FCbYVznctQ9V6KK7Vq6CjmYXJ7DeSs4E= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0/go.mod h1:UqL5mZ3qs6XYhDnZaW1Ps4upD+PX6LipH40AoeuIlwU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.39.0 h1:rm+Fizi7lTM2UefJ1TO347fSRcwmIsUAaZmYmIGBRAo= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.39.0/go.mod h1:sWFbI3jJ+6JdjOVepA5blpv/TJ20Hw+26561iMbWcwU= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= -go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= -go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= -go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= +go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2373,8 +2390,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2418,8 +2435,8 @@ golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20150829230318-ea47fc708ee3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2557,8 +2574,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2693,9 +2710,9 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2725,8 +2742,9 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2813,8 +2831,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2900,8 +2918,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.147.0 h1:Can3FaQo9LlVqxJCodNmeZW/ib3/qKAY3rFeXiHo5gc= -google.golang.org/api v0.147.0/go.mod h1:pQ/9j83DcmPd/5C9e2nFOdjjNkDZ1G+zkbK2uvdkJMs= +google.golang.org/api v0.150.0 h1:Z9k22qD289SZ8gCJrk4DrWXkNjtfvKAUo/l1ma8eBYE= +google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3045,12 +3063,12 @@ google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= -google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= -google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= -google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -3093,8 +3111,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -3194,10 +3212,10 @@ honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= +k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= +k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= +k8s.io/apimachinery v0.28.4/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg= k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= diff --git a/migrations/all/inputs.nats_consumer.go b/migrations/all/inputs.nats_consumer.go new file mode 100644 index 0000000000000..e0ed769fb3014 --- /dev/null +++ b/migrations/all/inputs.nats_consumer.go @@ -0,0 +1,5 @@ +//go:build !custom || (migrations && (inputs || inputs.nats_consumer)) + +package all + +import _ "github.com/influxdata/telegraf/migrations/inputs_nats_consumer" // register migration diff --git a/migrations/all/inputs_mqtt_consumer.go b/migrations/all/inputs_mqtt_consumer.go new file mode 100644 index 0000000000000..b5ed6db5146d3 --- /dev/null +++ b/migrations/all/inputs_mqtt_consumer.go @@ -0,0 +1,5 @@ +//go:build !custom || (migrations && (inputs || inputs.mqtt_consumer)) + +package all + +import _ "github.com/influxdata/telegraf/migrations/inputs_mqtt_consumer" // register migration diff --git a/migrations/all/inputs_procstat.go b/migrations/all/inputs_procstat.go new file mode 100644 index 0000000000000..29dbdc5735e9a --- /dev/null +++ b/migrations/all/inputs_procstat.go @@ -0,0 +1,5 @@ +//go:build !custom || (migrations && (inputs || inputs.procstat)) + +package all + +import _ "github.com/influxdata/telegraf/migrations/inputs_procstat" // register migration diff --git a/migrations/all/outputs_influxdb.go b/migrations/all/outputs_influxdb.go new file mode 100644 index 0000000000000..1d9fde9767f56 --- /dev/null +++ b/migrations/all/outputs_influxdb.go @@ -0,0 +1,5 @@ +//go:build !custom || (migrations && (outputs || outputs.influxdb)) + +package all + +import _ "github.com/influxdata/telegraf/migrations/outputs_influxdb" // register migration diff --git a/migrations/inputs_mqtt_consumer/migration.go b/migrations/inputs_mqtt_consumer/migration.go new file mode 100644 index 0000000000000..9137af5906d62 --- /dev/null +++ b/migrations/inputs_mqtt_consumer/migration.go @@ -0,0 +1,43 @@ +package inputs_mqtt_consumer + +import ( + "github.com/influxdata/toml" + "github.com/influxdata/toml/ast" + + "github.com/influxdata/telegraf/migrations" +) + +// Migration function +func migrate(tbl *ast.Table) ([]byte, string, error) { + // Decode the old data structure + var plugin map[string]interface{} + if err := toml.UnmarshalTable(tbl, &plugin); err != nil { + return nil, "", err + } + + // Check for deprecated option(s) and migrate them + var applied bool + if _, found := plugin["metric_buffer"]; found { + applied = true + + // Remove the ignored setting + delete(plugin, "metric_buffer") + } + + // No options migrated so we can exit early + if !applied { + return nil, "", migrations.ErrNotApplicable + } + + // Create the corresponding plugin configurations + cfg := migrations.CreateTOMLStruct("inputs", "mqtt_consumer") + cfg.Add("inputs", "mqtt_consumer", plugin) + + output, err := toml.Marshal(cfg) + return output, "", err +} + +// Register the migration function for the plugin type +func init() { + migrations.AddPluginOptionMigration("inputs.mqtt_consumer", migrate) +} diff --git a/migrations/inputs_mqtt_consumer/migration_test.go b/migrations/inputs_mqtt_consumer/migration_test.go new file mode 100644 index 0000000000000..a1494e3cf2d21 --- /dev/null +++ b/migrations/inputs_mqtt_consumer/migration_test.go @@ -0,0 +1,161 @@ +package inputs_mqtt_consumer_test + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" + _ "github.com/influxdata/telegraf/migrations/inputs_mqtt_consumer" // register migration + _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" // register plugin + _ "github.com/influxdata/telegraf/plugins/parsers/all" // register parsers +) + +func TestNoMigration(t *testing.T) { + defaultCfg := []byte(` + # Read metrics from MQTT topic(s) + [[inputs.mqtt_consumer]] + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a separate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] + + ## Topics that will be subscribed to. + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] + + ## The message topic will be stored in a tag specified by this value. If set + ## to the empty string no topic tag will be created. + # topic_tag = "topic" + + ## QoS policy for messages + ## 0 = at most once + ## 1 = at least once + ## 2 = exactly once + ## + ## When using a QoS of 1 or 2, you should enable persistent_session to allow + ## resuming unacknowledged messages. + # qos = 0 + + ## Connection timeout for initial connection in seconds + # connection_timeout = "30s" + + ## Max undelivered messages + ## This plugin uses tracking metrics, which ensure messages are read to + ## outputs before acknowledging them to the original broker to ensure data + ## is not lost. This option sets the maximum messages to read from the + ## broker that have not been written by an output. + ## + ## This value needs to be picked with awareness of the agent's + ## metric_batch_size value as well. Setting max undelivered messages too high + ## can result in a constant stream of data batches to the output. While + ## setting it too low may never flush the broker's messages. + # max_undelivered_messages = 1000 + + ## Persistent session disables clearing of the client session on connection. + ## In order for this option to work you must also set client_id to identify + ## the client. To receive messages that arrived while the client is offline, + ## also set the qos option to 1 or 2 and don't forget to also set the QoS when + ## publishing. + # persistent_session = false + + ## If unset, a random client ID will be generated. + # client_id = "" + + ## Username and password to connect MQTT server. + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Client trace messages + ## When set to true, and debug mode enabled in the agent settings, the MQTT + ## client's messages are included in telegraf logs. These messages are very + ## noisey, but essential for debugging issues. + # client_trace = false + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Enable extracting tag values from MQTT topics + ## _ denotes an ignored entry in the topic path + # [[inputs.mqtt_consumer.topic_parsing]] + # topic = "" + # measurement = "" + # tags = "" + # fields = "" + ## Value supported is int, float, unit + # [[inputs.mqtt_consumer.topic.types]] + # key = type +`) + + // Migrate and check that nothing changed + output, n, err := config.ApplyMigrations(defaultCfg) + require.NoError(t, err) + require.NotEmpty(t, output) + require.Zero(t, n) + require.Equal(t, string(defaultCfg), string(output)) +} + +func TestCases(t *testing.T) { + // Get all directories in testdata + folders, err := os.ReadDir("testcases") + require.NoError(t, err) + + for _, f := range folders { + // Only handle folders + if !f.IsDir() { + continue + } + + t.Run(f.Name(), func(t *testing.T) { + testcasePath := filepath.Join("testcases", f.Name()) + inputFile := filepath.Join(testcasePath, "telegraf.conf") + expectedFile := filepath.Join(testcasePath, "expected.conf") + + // Read the expected output + expected := config.NewConfig() + require.NoError(t, expected.LoadConfig(expectedFile)) + require.NotEmpty(t, expected.Inputs) + + // Read the input data + input, remote, err := config.LoadConfigFile(inputFile) + require.NoError(t, err) + require.False(t, remote) + require.NotEmpty(t, input) + + // Migrate + output, n, err := config.ApplyMigrations(input) + require.NoError(t, err) + require.NotEmpty(t, output) + require.GreaterOrEqual(t, n, uint64(1)) + actual := config.NewConfig() + require.NoError(t, actual.LoadConfigData(output)) + + // Test the output + require.Len(t, actual.Inputs, len(expected.Inputs)) + actualIDs := make([]string, 0, len(expected.Inputs)) + expectedIDs := make([]string, 0, len(expected.Inputs)) + for i := range actual.Inputs { + actualIDs = append(actualIDs, actual.Inputs[i].ID()) + expectedIDs = append(expectedIDs, expected.Inputs[i].ID()) + } + require.ElementsMatch(t, expectedIDs, actualIDs, string(output)) + }) + } +} diff --git a/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer/expected.conf b/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer/expected.conf new file mode 100644 index 0000000000000..558bffa51abf6 --- /dev/null +++ b/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer/expected.conf @@ -0,0 +1,4 @@ +[[inputs.mqtt_consumer]] +data_format = "influx" +servers = ["tcp://127.0.0.1:1883"] +topics = ["telegraf/host01/cpu", "telegraf/+/mem", "sensors/#"] diff --git a/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer/telegraf.conf b/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer/telegraf.conf new file mode 100644 index 0000000000000..8bf21f8b74f6c --- /dev/null +++ b/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer/telegraf.conf @@ -0,0 +1,10 @@ +# Read metrics from MQTT topic(s) +[[inputs.mqtt_consumer]] + servers = ["tcp://127.0.0.1:1883"] + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] + metric_buffer = 1024 + data_format = "influx" diff --git a/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer_parser/expected.conf b/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer_parser/expected.conf new file mode 100644 index 0000000000000..bee249b3f5176 --- /dev/null +++ b/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer_parser/expected.conf @@ -0,0 +1,12 @@ +[[inputs.mqtt_consumer]] +data_format = "xpath_json" +servers = ["tcp://127.0.0.1:1883"] +topics = ["telegraf/host01/cpu", "telegraf/+/mem", "sensors/#"] +xpath_native_types = true + +[[inputs.mqtt_consumer.xpath]] +field_selection = "/fields/*" +metric_name = "/name" +tag_selection = "/tags/*" +timestamp = "/timestamp" +timestamp_format = "unix_ms" \ No newline at end of file diff --git a/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer_parser/telegraf.conf b/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer_parser/telegraf.conf new file mode 100644 index 0000000000000..1066e0097ff64 --- /dev/null +++ b/migrations/inputs_mqtt_consumer/testcases/deprecated_metric_buffer_parser/telegraf.conf @@ -0,0 +1,20 @@ +# Read metrics from MQTT topic(s) +[[inputs.mqtt_consumer]] + servers = ["tcp://127.0.0.1:1883"] + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] + metric_buffer = 1024 + + data_format = "xpath_json" + xpath_native_types = true + + # Configuration matching the first (ENERGY) message + [[inputs.mqtt_consumer.xpath]] + metric_name = "/name" + timestamp = "/timestamp" + timestamp_format = "unix_ms" + field_selection = "/fields/*" + tag_selection = "/tags/*" \ No newline at end of file diff --git a/migrations/inputs_nats_consumer/migration.go b/migrations/inputs_nats_consumer/migration.go new file mode 100644 index 0000000000000..77b1f4d3ed4ac --- /dev/null +++ b/migrations/inputs_nats_consumer/migration.go @@ -0,0 +1,43 @@ +package inputs_nats_consumer + +import ( + "github.com/influxdata/toml" + "github.com/influxdata/toml/ast" + + "github.com/influxdata/telegraf/migrations" +) + +// Migration function +func migrate(tbl *ast.Table) ([]byte, string, error) { + // Decode the old data structure + var plugin map[string]interface{} + if err := toml.UnmarshalTable(tbl, &plugin); err != nil { + return nil, "", err + } + + // Check for deprecated option(s) and migrate them + var applied bool + if _, found := plugin["metric_buffer"]; found { + applied = true + + // Remove the ignored setting + delete(plugin, "metric_buffer") + } + + // No options migrated so we can exit early + if !applied { + return nil, "", migrations.ErrNotApplicable + } + + // Create the corresponding plugin configurations + cfg := migrations.CreateTOMLStruct("inputs", "nats_consumer") + cfg.Add("inputs", "nats_consumer", plugin) + + output, err := toml.Marshal(cfg) + return output, "", err +} + +// Register the migration function for the plugin type +func init() { + migrations.AddPluginOptionMigration("inputs.nats_consumer", migrate) +} diff --git a/migrations/inputs_nats_consumer/migration_test.go b/migrations/inputs_nats_consumer/migration_test.go new file mode 100644 index 0000000000000..66ab428fd8b6b --- /dev/null +++ b/migrations/inputs_nats_consumer/migration_test.go @@ -0,0 +1,135 @@ +package inputs_nats_consumer_test + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" + _ "github.com/influxdata/telegraf/migrations/inputs_nats_consumer" // register migration + _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" // register plugin + _ "github.com/influxdata/telegraf/plugins/parsers/all" // register parsers +) + +func TestNoMigration(t *testing.T) { + defaultCfg := []byte(` +# Read metrics from NATS subject(s) + [[inputs.nats_consumer]] + ## urls of NATS servers + servers = ["nats://localhost:4222"] + + ## subject(s) to consume + ## If you use jetstream you need to set the subjects + ## in jetstream_subjects + subjects = ["telegraf"] + + ## jetstream subjects + ## jetstream is a streaming technology inside of nats. + ## With jetstream the nats-server persists messages and + ## a consumer can consume historical messages. This is + ## useful when telegraf needs to restart it don't miss a + ## message. You need to configure the nats-server. + ## https://docs.nats.io/nats-concepts/jetstream. + jetstream_subjects = ["js_telegraf"] + + ## name a queue group + queue_group = "telegraf_consumers" + + ## Optional credentials + # username = "" + # password = "" + + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + + ## Use Transport Layer Security + # secure = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Sets the limits for pending msgs and bytes for each subscription + ## These shouldn't need to be adjusted except in very high throughput scenarios + # pending_message_limit = 65536 + # pending_bytes_limit = 67108864 + + ## Max undelivered messages + ## This plugin uses tracking metrics, which ensure messages are read to + ## outputs before acknowledging them to the original broker to ensure data + ## is not lost. This option sets the maximum messages to read from the + ## broker that have not been written by an output. + ## + ## This value needs to be picked with awareness of the agent's + ## metric_batch_size value as well. Setting max undelivered messages too high + ## can result in a constant stream of data batches to the output. While + ## setting it too low may never flush the broker's messages. + # max_undelivered_messages = 1000 + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +`) + + // Migrate and check that nothing changed + output, n, err := config.ApplyMigrations(defaultCfg) + require.NoError(t, err) + require.NotEmpty(t, output) + require.Zero(t, n) + require.Equal(t, string(defaultCfg), string(output)) +} + +func TestCases(t *testing.T) { + // Get all directories in testdata + folders, err := os.ReadDir("testcases") + require.NoError(t, err) + + for _, f := range folders { + // Only handle folders + if !f.IsDir() { + continue + } + + t.Run(f.Name(), func(t *testing.T) { + testcasePath := filepath.Join("testcases", f.Name()) + inputFile := filepath.Join(testcasePath, "telegraf.conf") + expectedFile := filepath.Join(testcasePath, "expected.conf") + + // Read the expected output + expected := config.NewConfig() + require.NoError(t, expected.LoadConfig(expectedFile)) + require.NotEmpty(t, expected.Inputs) + + // Read the input data + input, remote, err := config.LoadConfigFile(inputFile) + require.NoError(t, err) + require.False(t, remote) + require.NotEmpty(t, input) + + // Migrate + output, n, err := config.ApplyMigrations(input) + require.NoError(t, err) + require.NotEmpty(t, output) + require.GreaterOrEqual(t, n, uint64(1)) + actual := config.NewConfig() + require.NoError(t, actual.LoadConfigData(output)) + + // Test the output + require.Len(t, actual.Inputs, len(expected.Inputs)) + actualIDs := make([]string, 0, len(expected.Inputs)) + expectedIDs := make([]string, 0, len(expected.Inputs)) + for i := range actual.Inputs { + actualIDs = append(actualIDs, actual.Inputs[i].ID()) + expectedIDs = append(expectedIDs, expected.Inputs[i].ID()) + } + require.ElementsMatch(t, expectedIDs, actualIDs, string(output)) + }) + } +} diff --git a/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer/expected.conf b/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer/expected.conf new file mode 100644 index 0000000000000..13465564cb7fd --- /dev/null +++ b/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer/expected.conf @@ -0,0 +1,6 @@ +[[inputs.nats_consumer]] +servers = ["nats://localhost:4222"] +subjects = ["telegraf"] +jetstream_subjects = ["js_telegraf"] +queue_group = "telegraf_consumers" +data_format = "influx" diff --git a/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer/telegraf.conf b/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer/telegraf.conf new file mode 100644 index 0000000000000..edccdc6dc496f --- /dev/null +++ b/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer/telegraf.conf @@ -0,0 +1,19 @@ +# Read metrics from NATS subject(s) +[[inputs.nats_consumer]] + ## urls of NATS servers + servers = ["nats://localhost:4222"] + + ## subject(s) to consume + subjects = ["telegraf"] + + ## jetstream subjects + jetstream_subjects = ["js_telegraf"] + + ## name a queue group + queue_group = "telegraf_consumers" + + ## Input data format + data_format = "influx" + + ## Number of metrics to buffer + metric_buffer = 1024 diff --git a/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer_parser/expected.conf b/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer_parser/expected.conf new file mode 100644 index 0000000000000..1e001ae76271e --- /dev/null +++ b/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer_parser/expected.conf @@ -0,0 +1,14 @@ +[[inputs.nats_consumer]] + servers = ["nats://localhost:4222"] + subjects = ["telegraf"] + jetstream_subjects = ["js_telegraf"] + queue_group = "telegraf_consumers" + data_format = "xpath_json" + xpath_native_types = true + + [[inputs.nats_consumer.xpath]] + metric_name = "/name" + timestamp = "/timestamp" + timestamp_format = "unix_ms" + field_selection = "/fields/*" + tag_selection = "/tags/*" \ No newline at end of file diff --git a/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer_parser/telegraf.conf b/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer_parser/telegraf.conf new file mode 100644 index 0000000000000..6655049bdc251 --- /dev/null +++ b/migrations/inputs_nats_consumer/testcases/deprecated_metric_buffer_parser/telegraf.conf @@ -0,0 +1,27 @@ +# Read metrics from NATS subject(s) +[[inputs.nats_consumer]] + ## urls of NATS servers + servers = ["nats://localhost:4222"] + + ## subject(s) to consume + subjects = ["telegraf"] + + ## jetstream subjects + jetstream_subjects = ["js_telegraf"] + + ## name a queue group + queue_group = "telegraf_consumers" + + ## Number of metrics to buffer + metric_buffer = 1024 + + data_format = "xpath_json" + xpath_native_types = true + + # Configuration matching the first (ENERGY) message + [[inputs.nats_consumer.xpath]] + metric_name = "/name" + timestamp = "/timestamp" + timestamp_format = "unix_ms" + field_selection = "/fields/*" + tag_selection = "/tags/*" \ No newline at end of file diff --git a/migrations/inputs_procstat/migration.go b/migrations/inputs_procstat/migration.go new file mode 100644 index 0000000000000..0826b8803b3f3 --- /dev/null +++ b/migrations/inputs_procstat/migration.go @@ -0,0 +1,76 @@ +package inputs_procstat + +import ( + "fmt" + + "github.com/influxdata/toml" + "github.com/influxdata/toml/ast" + + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/migrations" +) + +// Migration function +func migrate(tbl *ast.Table) ([]byte, string, error) { + // Decode the old data structure + var plugin map[string]interface{} + if err := toml.UnmarshalTable(tbl, &plugin); err != nil { + return nil, "", err + } + + // Check for deprecated option(s) and migrate them + var applied bool + if oldUnits, found := plugin["supervisor_unit"]; found { + applied = true + + // Check if the new option already exists and merge the two + var units []string + if newUnits, found := plugin["supervisor_units"]; found { + nu, ok := newUnits.([]interface{}) + if !ok { + return nil, "", fmt.Errorf("setting 'supervisor_units' has wrong type %T", newUnits) + } + for _, raw := range nu { + u, ok := raw.(string) + if !ok { + return nil, "", fmt.Errorf("setting 'supervisor_units' contains wrong type %T", raw) + } + units = append(units, u) + } + } + ou, ok := oldUnits.([]interface{}) + if !ok { + return nil, "", fmt.Errorf("setting 'supervisor_unit' has wrong type %T", oldUnits) + } + for _, raw := range ou { + u, ok := raw.(string) + if !ok { + return nil, "", fmt.Errorf("setting 'supervisor_unit' contains wrong type %T", raw) + } + if !choice.Contains(u, units) { + units = append(units, u) + } + } + plugin["supervisor_units"] = units + + // Remove deprecated setting + delete(plugin, "supervisor_unit") + } + + // No options migrated so we can exit early + if !applied { + return nil, "", migrations.ErrNotApplicable + } + + // Create the corresponding plugin configurations + cfg := migrations.CreateTOMLStruct("inputs", "procstat") + cfg.Add("inputs", "procstat", plugin) + + output, err := toml.Marshal(cfg) + return output, "", err +} + +// Register the migration function for the plugin type +func init() { + migrations.AddPluginOptionMigration("inputs.procstat", migrate) +} diff --git a/migrations/inputs_procstat/migration_test.go b/migrations/inputs_procstat/migration_test.go new file mode 100644 index 0000000000000..0dc5cca883c80 --- /dev/null +++ b/migrations/inputs_procstat/migration_test.go @@ -0,0 +1,160 @@ +package inputs_procstat_test + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" + _ "github.com/influxdata/telegraf/migrations/inputs_procstat" // register migration + _ "github.com/influxdata/telegraf/plugins/inputs/procstat" // register plugin +) + +func TestNoMigration(t *testing.T) { + defaultCfg := []byte(` + # Read metrics from MQTT topic(s) + [[inputs.mqtt_consumer]] + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a separate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] + + ## Topics that will be subscribed to. + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] + + ## The message topic will be stored in a tag specified by this value. If set + ## to the empty string no topic tag will be created. + # topic_tag = "topic" + + ## QoS policy for messages + ## 0 = at most once + ## 1 = at least once + ## 2 = exactly once + ## + ## When using a QoS of 1 or 2, you should enable persistent_session to allow + ## resuming unacknowledged messages. + # qos = 0 + + ## Connection timeout for initial connection in seconds + # connection_timeout = "30s" + + ## Max undelivered messages + ## This plugin uses tracking metrics, which ensure messages are read to + ## outputs before acknowledging them to the original broker to ensure data + ## is not lost. This option sets the maximum messages to read from the + ## broker that have not been written by an output. + ## + ## This value needs to be picked with awareness of the agent's + ## metric_batch_size value as well. Setting max undelivered messages too high + ## can result in a constant stream of data batches to the output. While + ## setting it too low may never flush the broker's messages. + # max_undelivered_messages = 1000 + + ## Persistent session disables clearing of the client session on connection. + ## In order for this option to work you must also set client_id to identify + ## the client. To receive messages that arrived while the client is offline, + ## also set the qos option to 1 or 2 and don't forget to also set the QoS when + ## publishing. + # persistent_session = false + + ## If unset, a random client ID will be generated. + # client_id = "" + + ## Username and password to connect MQTT server. + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Client trace messages + ## When set to true, and debug mode enabled in the agent settings, the MQTT + ## client's messages are included in telegraf logs. These messages are very + ## noisey, but essential for debugging issues. + # client_trace = false + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Enable extracting tag values from MQTT topics + ## _ denotes an ignored entry in the topic path + # [[inputs.mqtt_consumer.topic_parsing]] + # topic = "" + # measurement = "" + # tags = "" + # fields = "" + ## Value supported is int, float, unit + # [[inputs.mqtt_consumer.topic.types]] + # key = type +`) + + // Migrate and check that nothing changed + output, n, err := config.ApplyMigrations(defaultCfg) + require.NoError(t, err) + require.NotEmpty(t, output) + require.Zero(t, n) + require.Equal(t, string(defaultCfg), string(output)) +} + +func TestCases(t *testing.T) { + // Get all directories in testdata + folders, err := os.ReadDir("testcases") + require.NoError(t, err) + + for _, f := range folders { + // Only handle folders + if !f.IsDir() { + continue + } + + t.Run(f.Name(), func(t *testing.T) { + testcasePath := filepath.Join("testcases", f.Name()) + inputFile := filepath.Join(testcasePath, "telegraf.conf") + expectedFile := filepath.Join(testcasePath, "expected.conf") + + // Read the expected output + expected := config.NewConfig() + require.NoError(t, expected.LoadConfig(expectedFile)) + require.NotEmpty(t, expected.Inputs) + + // Read the input data + input, remote, err := config.LoadConfigFile(inputFile) + require.NoError(t, err) + require.False(t, remote) + require.NotEmpty(t, input) + + // Migrate + output, n, err := config.ApplyMigrations(input) + require.NoError(t, err) + require.NotEmpty(t, output) + require.GreaterOrEqual(t, n, uint64(1)) + actual := config.NewConfig() + require.NoError(t, actual.LoadConfigData(output)) + + // Test the output + require.Len(t, actual.Inputs, len(expected.Inputs)) + actualIDs := make([]string, 0, len(expected.Inputs)) + expectedIDs := make([]string, 0, len(expected.Inputs)) + for i := range actual.Inputs { + actualIDs = append(actualIDs, actual.Inputs[i].ID()) + expectedIDs = append(expectedIDs, expected.Inputs[i].ID()) + } + require.ElementsMatch(t, expectedIDs, actualIDs, string(output)) + }) + } +} diff --git a/migrations/inputs_procstat/testcases/deprecated_supervisor_unit merge/expected.conf b/migrations/inputs_procstat/testcases/deprecated_supervisor_unit merge/expected.conf new file mode 100644 index 0000000000000..65bedc7159cfc --- /dev/null +++ b/migrations/inputs_procstat/testcases/deprecated_supervisor_unit merge/expected.conf @@ -0,0 +1,2 @@ +[[inputs.procstat]] +supervisor_units = ["upsd", "webserver", "mail", "proxy"] diff --git a/migrations/inputs_procstat/testcases/deprecated_supervisor_unit merge/telegraf.conf b/migrations/inputs_procstat/testcases/deprecated_supervisor_unit merge/telegraf.conf new file mode 100644 index 0000000000000..20c4efbffc853 --- /dev/null +++ b/migrations/inputs_procstat/testcases/deprecated_supervisor_unit merge/telegraf.conf @@ -0,0 +1,3 @@ +[[inputs.procstat]] + supervisor_unit = ["webserver", "proxy"] + supervisor_units = ["upsd", "webserver", "mail"] diff --git a/migrations/inputs_procstat/testcases/deprecated_supervisor_unit/expected.conf b/migrations/inputs_procstat/testcases/deprecated_supervisor_unit/expected.conf new file mode 100644 index 0000000000000..a31601c665c73 --- /dev/null +++ b/migrations/inputs_procstat/testcases/deprecated_supervisor_unit/expected.conf @@ -0,0 +1,3 @@ +[[inputs.procstat]] +pid_file = "/var/run/nginx.pid" +supervisor_units = ["webserver", "proxy"] diff --git a/migrations/inputs_procstat/testcases/deprecated_supervisor_unit/telegraf.conf b/migrations/inputs_procstat/testcases/deprecated_supervisor_unit/telegraf.conf new file mode 100644 index 0000000000000..9e809996548e3 --- /dev/null +++ b/migrations/inputs_procstat/testcases/deprecated_supervisor_unit/telegraf.conf @@ -0,0 +1,47 @@ +# Monitor process cpu and memory usage +[[inputs.procstat]] + ## PID file to monitor process + pid_file = "/var/run/nginx.pid" + ## executable name (ie, pgrep ) + # exe = "nginx" + ## pattern as argument for pgrep (ie, pgrep -f ) + # pattern = "nginx" + ## user as argument for pgrep (ie, pgrep -u ) + # user = "nginx" + ## Systemd unit name, supports globs when include_systemd_children is set to true + # systemd_unit = "nginx.service" + # include_systemd_children = false + ## CGroup name or path, supports globs + # cgroup = "systemd/system.slice/nginx.service" + ## Supervisor service names of hypervisorctl management + supervisor_unit = ["webserver", "proxy"] + + ## Windows service name + # win_service = "" + + ## override for process_name + ## This is optional; default is sourced from /proc//status + # process_name = "bar" + + ## Field name prefix + # prefix = "" + + ## When true add the full cmdline as a tag. + # cmdline_tag = false + + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + + ## Add the PID as a tag instead of as a field. When collecting multiple + ## processes with otherwise matching tags this setting should be enabled to + ## ensure each process has a unique identity. + ## + ## Enabling this option may result in a large number of series, especially + ## when processes have a short lifetime. + # pid_tag = false + + ## Method to use when finding process IDs. Can be one of 'pgrep', or + ## 'native'. The pgrep finder calls the pgrep executable in the PATH while + ## the native finder performs the search directly in a manor dependent on the + ## platform. Default is 'pgrep' + # pid_finder = "pgrep" diff --git a/migrations/outputs_influxdb/migration.go b/migrations/outputs_influxdb/migration.go new file mode 100644 index 0000000000000..ac7d01079cd72 --- /dev/null +++ b/migrations/outputs_influxdb/migration.go @@ -0,0 +1,72 @@ +package outputs_influxdb + +import ( + "errors" + "fmt" + + "github.com/influxdata/toml" + "github.com/influxdata/toml/ast" + + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/migrations" +) + +// Migration function +func migrate(tbl *ast.Table) ([]byte, string, error) { + // Decode the old data structure + var plugin map[string]interface{} + if err := toml.UnmarshalTable(tbl, &plugin); err != nil { + return nil, "", err + } + + // Check for deprecated option(s) and migrate them + var applied bool + if oldURL, found := plugin["url"]; found { + applied = true + + var urls []string + // Merge the old URL and the new URLs with deduplication + if newURLs, found := plugin["urls"]; found { + list, ok := newURLs.([]interface{}) + if !ok { + return nil, "", errors.New("'urls' setting is not a list") + } + for _, raw := range list { + nu, ok := raw.(string) + if !ok { + return nil, "", fmt.Errorf("unexpected 'urls' entry %v (%T)", raw, raw) + } + urls = append(urls, nu) + } + } + ou, ok := oldURL.(string) + if !ok { + return nil, "", fmt.Errorf("unexpected 'url' entry %v (%T)", ou, ou) + } + + if !choice.Contains(ou, urls) { + urls = append(urls, ou) + } + + // Update replacement and remove the deprecated setting + plugin["urls"] = urls + delete(plugin, "url") + } + + // No options migrated so we can exit early + if !applied { + return nil, "", migrations.ErrNotApplicable + } + + // Create the corresponding plugin configurations + cfg := migrations.CreateTOMLStruct("outputs", "influxdb") + cfg.Add("outputs", "influxdb", plugin) + + output, err := toml.Marshal(cfg) + return output, "", err +} + +// Register the migration function for the plugin type +func init() { + migrations.AddPluginOptionMigration("outputs.influxdb", migrate) +} diff --git a/migrations/outputs_influxdb/migration_test.go b/migrations/outputs_influxdb/migration_test.go new file mode 100644 index 0000000000000..9731e861109c8 --- /dev/null +++ b/migrations/outputs_influxdb/migration_test.go @@ -0,0 +1,73 @@ +package outputs_influxdb_test + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" + _ "github.com/influxdata/telegraf/migrations/outputs_influxdb" // register migration + "github.com/influxdata/telegraf/plugins/outputs/influxdb" // register plugin +) + +func TestNoMigration(t *testing.T) { + plugin := &influxdb.InfluxDB{} + defaultCfg := []byte(plugin.SampleConfig()) + + // Migrate and check that nothing changed + output, n, err := config.ApplyMigrations(defaultCfg) + require.NoError(t, err) + require.NotEmpty(t, output) + require.Zero(t, n) + require.Equal(t, string(defaultCfg), string(output)) +} + +func TestCases(t *testing.T) { + // Get all directories in testdata + folders, err := os.ReadDir("testcases") + require.NoError(t, err) + + for _, f := range folders { + // Only handle folders + if !f.IsDir() { + continue + } + + t.Run(f.Name(), func(t *testing.T) { + testcasePath := filepath.Join("testcases", f.Name()) + inputFile := filepath.Join(testcasePath, "telegraf.conf") + expectedFile := filepath.Join(testcasePath, "expected.conf") + + // Read the expected output + expected := config.NewConfig() + require.NoError(t, expected.LoadConfig(expectedFile)) + require.NotEmpty(t, expected.Outputs) + + // Read the input data + input, remote, err := config.LoadConfigFile(inputFile) + require.NoError(t, err) + require.False(t, remote) + require.NotEmpty(t, input) + + // Migrate + output, n, err := config.ApplyMigrations(input) + require.NoError(t, err) + require.NotEmpty(t, output) + require.GreaterOrEqual(t, n, uint64(1)) + actual := config.NewConfig() + require.NoError(t, actual.LoadConfigData(output)) + + // Test the output + require.Len(t, actual.Outputs, len(expected.Outputs)) + actualIDs := make([]string, 0, len(expected.Outputs)) + expectedIDs := make([]string, 0, len(expected.Outputs)) + for i := range actual.Inputs { + actualIDs = append(actualIDs, actual.Outputs[i].ID()) + expectedIDs = append(expectedIDs, expected.Inputs[i].ID()) + } + require.ElementsMatch(t, expectedIDs, actualIDs, string(output)) + }) + } +} diff --git a/migrations/outputs_influxdb/testcases/convert_url/expected.conf b/migrations/outputs_influxdb/testcases/convert_url/expected.conf new file mode 100644 index 0000000000000..b29b97cd23b37 --- /dev/null +++ b/migrations/outputs_influxdb/testcases/convert_url/expected.conf @@ -0,0 +1,2 @@ +[[outputs.influxdb]] +urls = ["http://127.0.0.1:8086"] diff --git a/migrations/outputs_influxdb/testcases/convert_url/telegraf.conf b/migrations/outputs_influxdb/testcases/convert_url/telegraf.conf new file mode 100644 index 0000000000000..68411ec538aab --- /dev/null +++ b/migrations/outputs_influxdb/testcases/convert_url/telegraf.conf @@ -0,0 +1,78 @@ +# Configuration for sending metrics to InfluxDB +[[outputs.influxdb]] + url = "http://127.0.0.1:8086" + ## The full HTTP or UDP URL for your InfluxDB instance. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + # urls = ["unix:///var/run/influxdb.sock"] + # urls = ["udp://127.0.0.1:8089"] + # urls = ["http://127.0.0.1:8086"] + + ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. + # database = "telegraf" + + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the 'database_tag' will not be included in the written metric. + # exclude_database_tag = false + + ## If true, no CREATE DATABASE queries will be sent. Set to true when using + ## Telegraf with a user without permissions to create databases or when the + ## database already exists. + # skip_database_creation = false + + ## Name of existing retention policy to write to. Empty string writes to + ## the default retention policy. Only takes effect when using HTTP. + # retention_policy = "" + + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be included in the written metric. + # exclude_retention_policy_tag = false + + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". + ## Only takes effect when using HTTP. + # write_consistency = "any" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## HTTP Basic Auth + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## UDP payload size is the maximum packet size to send. + # udp_payload = "512B" + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + # influx_uint_support = false diff --git a/migrations/outputs_influxdb/testcases/merge_url/expected.conf b/migrations/outputs_influxdb/testcases/merge_url/expected.conf new file mode 100644 index 0000000000000..78d169d1b1fa7 --- /dev/null +++ b/migrations/outputs_influxdb/testcases/merge_url/expected.conf @@ -0,0 +1,5 @@ +[[outputs.influxdb]] +namepass = ["metrics"] +urls = ["udp://127.0.0.1:8089", "http://127.0.0.1:8086"] +database_tag = "table" +skip_database_creation = true diff --git a/migrations/outputs_influxdb/testcases/merge_url/telegraf.conf b/migrations/outputs_influxdb/testcases/merge_url/telegraf.conf new file mode 100644 index 0000000000000..22fb32dfcffc5 --- /dev/null +++ b/migrations/outputs_influxdb/testcases/merge_url/telegraf.conf @@ -0,0 +1,7 @@ +# Configuration for sending metrics to InfluxDB +[[outputs.influxdb]] + namepass = ["metrics"] + url = "http://127.0.0.1:8086" + urls = ["udp://127.0.0.1:8089"] + database_tag = "table" + skip_database_creation = true diff --git a/migrations/outputs_influxdb/testcases/merge_url_overlap/expected.conf b/migrations/outputs_influxdb/testcases/merge_url_overlap/expected.conf new file mode 100644 index 0000000000000..4d6f29d5b3998 --- /dev/null +++ b/migrations/outputs_influxdb/testcases/merge_url_overlap/expected.conf @@ -0,0 +1,5 @@ +[[outputs.influxdb]] +namepass = ["metrics"] +urls = ["http://127.0.0.1:8086", "udp://127.0.0.1:8089"] +database_tag = "table" +skip_database_creation = true diff --git a/migrations/outputs_influxdb/testcases/merge_url_overlap/telegraf.conf b/migrations/outputs_influxdb/testcases/merge_url_overlap/telegraf.conf new file mode 100644 index 0000000000000..897ea2f8a3dc9 --- /dev/null +++ b/migrations/outputs_influxdb/testcases/merge_url_overlap/telegraf.conf @@ -0,0 +1,7 @@ +# Configuration for sending metrics to InfluxDB +[[outputs.influxdb]] + namepass = ["metrics"] + url = "http://127.0.0.1:8086" + urls = ["http://127.0.0.1:8086", "udp://127.0.0.1:8089"] + database_tag = "table" + skip_database_creation = true diff --git a/plugins/common/kafka/config.go b/plugins/common/kafka/config.go index f007d24ec1b7f..cd3fd432aedef 100644 --- a/plugins/common/kafka/config.go +++ b/plugins/common/kafka/config.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/influxdata/telegraf" tgConf "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" diff --git a/plugins/common/kafka/logger.go b/plugins/common/kafka/logger.go index ad264c31ee4e4..dd0a65a18fb43 100644 --- a/plugins/common/kafka/logger.go +++ b/plugins/common/kafka/logger.go @@ -1,7 +1,7 @@ package kafka import ( - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" diff --git a/plugins/common/kafka/sasl.go b/plugins/common/kafka/sasl.go index 029a18c1fc31e..4a7356aa88749 100644 --- a/plugins/common/kafka/sasl.go +++ b/plugins/common/kafka/sasl.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/influxdata/telegraf/config" ) diff --git a/plugins/common/mqtt/mqtt.go b/plugins/common/mqtt/mqtt.go index 892eec39c0d51..3534ad093b72b 100644 --- a/plugins/common/mqtt/mqtt.go +++ b/plugins/common/mqtt/mqtt.go @@ -72,7 +72,7 @@ func NewClient(cfg *MqttConfig) (Client, error) { case "5": return NewMQTTv5Client(cfg) } - return nil, fmt.Errorf("unsuported protocol %q: must be \"3.1.1\" or \"5\"", cfg.Protocol) + return nil, fmt.Errorf("unsupported protocol %q: must be \"3.1.1\" or \"5\"", cfg.Protocol) } func parseServers(servers []string) ([]*url.URL, error) { diff --git a/plugins/common/oauth/config.go b/plugins/common/oauth/config.go index 6175e209363f3..9ae45674a2a2a 100644 --- a/plugins/common/oauth/config.go +++ b/plugins/common/oauth/config.go @@ -3,6 +3,7 @@ package oauth import ( "context" "net/http" + "net/url" "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" @@ -23,10 +24,11 @@ func (o *OAuth2Config) CreateOauth2Client(ctx context.Context, client *http.Clie } oauthConfig := clientcredentials.Config{ - ClientID: o.ClientID, - ClientSecret: o.ClientSecret, - TokenURL: o.TokenURL, - Scopes: o.Scopes, + ClientID: o.ClientID, + ClientSecret: o.ClientSecret, + TokenURL: o.TokenURL, + Scopes: o.Scopes, + EndpointParams: make(url.Values), } if o.Audience != "" { diff --git a/plugins/common/opcua/input/input_client_test.go b/plugins/common/opcua/input/input_client_test.go index dac2ab5e7ee88..a55735f616270 100644 --- a/plugins/common/opcua/input/input_client_test.go +++ b/plugins/common/opcua/input/input_client_test.go @@ -7,20 +7,21 @@ import ( "time" "github.com/gopcua/opcua/ua" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/common/opcua" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestTagsSliceToMap(t *testing.T) { m, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"baz", "bat"}}) require.NoError(t, err) require.Len(t, m, 2) - require.Equal(t, m["foo"], "bar") - require.Equal(t, m["baz"], "bat") + require.Equal(t, "bar", m["foo"]) + require.Equal(t, "bat", m["baz"]) } func TestTagsSliceToMap_twoStrings(t *testing.T) { @@ -316,7 +317,7 @@ func TestNewNodeMetricMappingIdStrInstantiated(t *testing.T) { TagsSlice: [][]string{}, }, map[string]string{}) require.NoError(t, err) - require.Equal(t, nmm.idStr, "ns=2;s=h") + require.Equal(t, "ns=2;s=h", nmm.idStr) } func TestValidateNodeToAdd(t *testing.T) { diff --git a/plugins/common/starlark/metric.go b/plugins/common/starlark/metric.go index f1632312c84d0..02e4e0e706901 100644 --- a/plugins/common/starlark/metric.go +++ b/plugins/common/starlark/metric.go @@ -12,6 +12,8 @@ import ( ) type Metric struct { + ID uint64 + metric telegraf.Metric tagIterCount int fieldIterCount int @@ -20,6 +22,7 @@ type Metric struct { // Wrap updates the starlark.Metric to wrap a new telegraf.Metric. func (m *Metric) Wrap(metric telegraf.Metric) { + m.ID = metric.HashID() m.metric = metric m.tagIterCount = 0 m.fieldIterCount = 0 diff --git a/plugins/inputs/azure_monitor/README.md b/plugins/inputs/azure_monitor/README.md index 135bf87b848bd..b03d1a461b2c3 100644 --- a/plugins/inputs/azure_monitor/README.md +++ b/plugins/inputs/azure_monitor/README.md @@ -74,7 +74,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # resource target #1 to collect metrics from [[inputs.azure_monitor.resource_target]] - # can be found undet Overview->Essentials->JSON View in the Azure portal for your application/service + # can be found under Overview->Essentials->JSON View in the Azure portal for your application/service # must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx' # must be removed from the beginning of Resource ID property value) resource_id = "<>" diff --git a/plugins/inputs/azure_monitor/sample.conf b/plugins/inputs/azure_monitor/sample.conf index bedb0bc99ab7a..9f7bf30848e22 100644 --- a/plugins/inputs/azure_monitor/sample.conf +++ b/plugins/inputs/azure_monitor/sample.conf @@ -11,7 +11,7 @@ # resource target #1 to collect metrics from [[inputs.azure_monitor.resource_target]] - # can be found undet Overview->Essentials->JSON View in the Azure portal for your application/service + # can be found under Overview->Essentials->JSON View in the Azure portal for your application/service # must start with 'resourceGroups/...' ('/subscriptions/xxxxxxxx-xxxx-xxxx-xxx-xxxxxxxxxxxx' # must be removed from the beginning of Resource ID property value) resource_id = "<>" diff --git a/plugins/inputs/beat/beat_test.go b/plugins/inputs/beat/beat_test.go index 2ca2ffba0a149..fae8818f49f3d 100644 --- a/plugins/inputs/beat/beat_test.go +++ b/plugins/inputs/beat/beat_test.go @@ -9,8 +9,9 @@ import ( "os" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func Test_BeatStats(t *testing.T) { @@ -177,10 +178,10 @@ func Test_BeatRequest(t *testing.T) { data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) - require.Equal(t, request.Host, "beat.test.local") - require.Equal(t, request.Method, "POST") - require.Equal(t, request.Header.Get("Authorization"), "Basic YWRtaW46UFdE") - require.Equal(t, request.Header.Get("X-Test"), "test-value") + require.Equal(t, "beat.test.local", request.Host) + require.Equal(t, "POST", request.Method) + require.Equal(t, "Basic YWRtaW46UFdE", request.Header.Get("Authorization")) + require.Equal(t, "test-value", request.Header.Get("X-Test")) _, err = w.Write(data) require.NoError(t, err, "could not write data") diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 340f5e58d38df..a782319ea0944 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -25,7 +25,7 @@ type expectedResult struct { func TestParseSockId(t *testing.T) { s := parseSockID(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) - require.Equal(t, s, "1") + require.Equal(t, "1", s) } func TestParseMonDump(t *testing.T) { diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index b0ff020d9100d..3d6ab0b634b90 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -1005,7 +1005,7 @@ func TestTCPDialoutMultiple(t *testing.T) { require.NoError(t, conn.Close()) // We use the invalid dialout flags to let the server close the connection - require.Equal(t, acc.Errors, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")}) + require.Equal(t, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")}, acc.Errors) tags := map[string]string{ "path": "type:model/some/path", @@ -1060,7 +1060,7 @@ func TestGRPCDialoutError(t *testing.T) { require.True(t, err == nil || errors.Is(err, io.EOF)) c.Stop() - require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: foobar")}) + require.Equal(t, []error{errors.New("GRPC dialout error: foobar")}, acc.Errors) } func TestGRPCDialoutMultiple(t *testing.T) { @@ -1119,7 +1119,7 @@ func TestGRPCDialoutMultiple(t *testing.T) { c.Stop() require.NoError(t, conn.Close()) - require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")}) + require.Equal(t, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")}, acc.Errors) tags := map[string]string{ "path": "type:model/some/path", diff --git a/plugins/inputs/cloud_pubsub/cloud_pubsub_test.go b/plugins/inputs/cloud_pubsub/cloud_pubsub_test.go index 6da36a2c5f2f1..fb15b6ec47d5a 100644 --- a/plugins/inputs/cloud_pubsub/cloud_pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/cloud_pubsub_test.go @@ -56,7 +56,7 @@ func TestRunParse(t *testing.T) { sub.messages <- msg acc.Wait(1) - require.Equal(t, acc.NFields(), 1) + require.Equal(t, 1, acc.NFields()) metric := acc.Metrics[0] validateTestInfluxMetric(t, metric) } @@ -102,7 +102,7 @@ func TestRunBase64(t *testing.T) { sub.messages <- msg acc.Wait(1) - require.Equal(t, acc.NFields(), 1) + require.Equal(t, 1, acc.NFields()) metric := acc.Metrics[0] validateTestInfluxMetric(t, metric) } @@ -151,7 +151,7 @@ func TestRunGzipDecode(t *testing.T) { } sub.messages <- msg acc.Wait(1) - require.Equal(t, acc.NFields(), 1) + require.Equal(t, 1, acc.NFields()) metric := acc.Metrics[0] validateTestInfluxMetric(t, metric) } @@ -200,7 +200,7 @@ func TestRunInvalidMessages(t *testing.T) { // Make sure we acknowledged message so we don't receive it again. testTracker.WaitForAck(1) - require.Equal(t, acc.NFields(), 0) + require.Equal(t, 0, acc.NFields()) } func TestRunOverlongMessages(t *testing.T) { @@ -249,7 +249,7 @@ func TestRunOverlongMessages(t *testing.T) { // Make sure we acknowledged message so we don't receive it again. testTracker.WaitForAck(1) - require.Equal(t, acc.NFields(), 0) + require.Equal(t, 0, acc.NFields()) } func TestRunErrorInSubscriber(t *testing.T) { diff --git a/plugins/inputs/ctrlx_datalayer/README.md b/plugins/inputs/ctrlx_datalayer/README.md index fcd6c38668966..2f5ea35c15604 100644 --- a/plugins/inputs/ctrlx_datalayer/README.md +++ b/plugins/inputs/ctrlx_datalayer/README.md @@ -1,7 +1,7 @@ # ctrlX Data Layer Input Plugin The `ctrlx_datalayer` plugin gathers data from the ctrlX Data Layer, -a communication middleware runnning on +a communication middleware running on [ctrlX CORE devices](https://ctrlx-core.com) from [Bosch Rexroth](https://boschrexroth.com). The platform is used for professional automation applications like industrial automation, building diff --git a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go index 6f9bde66e7059..525a5c4bbd05b 100644 --- a/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go +++ b/plugins/inputs/ctrlx_datalayer/ctrlx_datalayer.go @@ -60,7 +60,7 @@ type CtrlXDataLayer struct { // convertTimestamp2UnixTime converts the given Data Layer timestamp of the payload to UnixTime. func convertTimestamp2UnixTime(t int64) time.Time { - // 1 sec=1000 milisec=1000000 microsec=1000000000 nanosec. + // 1 sec=1000 millisec=1000000 microsec=1000000000 nanosec. // Convert from FILETIME (100-nanosecond intervals since January 1, 1601 UTC) to // seconds and nanoseconds since January 1, 1970 UTC. // Between Jan 1, 1601 and Jan 1, 1970 there are 11644473600 seconds. diff --git a/plugins/inputs/disk/README.md b/plugins/inputs/disk/README.md index f8b27cc6f1e47..8a2bb0bec3785 100644 --- a/plugins/inputs/disk/README.md +++ b/plugins/inputs/disk/README.md @@ -63,6 +63,7 @@ docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/pro - inodes_free (integer, files) - inodes_total (integer, files) - inodes_used (integer, files) + - inodes_used_percent (float, percent) ## Troubleshooting @@ -85,11 +86,11 @@ sudo setfacl -R -m u:telegraf:X /var/lib/docker/volumes/ ## Example Output ```text -disk,fstype=hfs,mode=ro,path=/ free=398407520256i,inodes_free=97267461i,inodes_total=121847806i,inodes_used=24580345i,total=499088621568i,used=100418957312i,used_percent=20.131039916242397 1453832006274071563 -disk,fstype=devfs,mode=rw,path=/dev free=0i,inodes_free=0i,inodes_total=628i,inodes_used=628i,total=185856i,used=185856i,used_percent=100 1453832006274137913 -disk,fstype=autofs,mode=rw,path=/net free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0 1453832006274157077 -disk,fstype=autofs,mode=rw,path=/home free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0 1453832006274169688 -disk,device=dm-1,fstype=xfs,label=lvg-lv,mode=rw,path=/mnt inodes_free=8388605i,inodes_used=3i,total=17112760320i,free=16959598592i,used=153161728i,used_percent=0.8950147441789215,inodes_total=8388608i 1677001387000000000 +disk,fstype=hfs,mode=ro,path=/ free=398407520256i,inodes_free=97267461i,inodes_total=121847806i,inodes_used=24580345i,total=499088621568i,used=100418957312i,used_percent=20.131039916242397,inodes_used_percent=20.1729894 1453832006274071563 +disk,fstype=devfs,mode=rw,path=/dev free=0i,inodes_free=0i,inodes_total=628i,inodes_used=628i,total=185856i,used=185856i,used_percent=100,inodes_used_percent=100 1453832006274137913 +disk,fstype=autofs,mode=rw,path=/net free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0,inodes_used_percent=0 1453832006274157077 +disk,fstype=autofs,mode=rw,path=/home free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0,inodes_used_percent=0 1453832006274169688 +disk,device=dm-1,fstype=xfs,label=lvg-lv,mode=rw,path=/mnt inodes_free=8388605i,inodes_used=3i,total=17112760320i,free=16959598592i,used=153161728i,used_percent=0.8950147441789215,inodes_total=8388608i,inodes_used_percent=0.0017530778 1677001387000000000 ``` [statfs]: http://man7.org/linux/man-pages/man2/statfs.2.html diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index 71405840d3e75..616febc2f465d 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -75,14 +75,21 @@ func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { (float64(du.Used) + float64(du.Free)) * 100 } + var inodesUsedPercent float64 + if du.InodesUsed+du.InodesFree > 0 { + inodesUsedPercent = float64(du.InodesUsed) / + (float64(du.InodesUsed) + float64(du.InodesFree)) * 100 + } + fields := map[string]interface{}{ - "total": du.Total, - "free": du.Free, - "used": du.Used, - "used_percent": usedPercent, - "inodes_total": du.InodesTotal, - "inodes_free": du.InodesFree, - "inodes_used": du.InodesUsed, + "total": du.Total, + "free": du.Free, + "used": du.Used, + "used_percent": usedPercent, + "inodes_total": du.InodesTotal, + "inodes_free": du.InodesFree, + "inodes_used": du.InodesUsed, + "inodes_used_percent": inodesUsedPercent, } acc.AddGauge("disk", fields, tags) } diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index e47bebb326921..975fd58c685e6 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -93,7 +93,7 @@ func TestDiskUsage(t *testing.T) { require.NoError(t, err) numDiskMetrics := acc.NFields() - expectedAllDiskMetrics := 21 + expectedAllDiskMetrics := 24 require.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ @@ -116,31 +116,34 @@ func TestDiskUsage(t *testing.T) { } fields1 := map[string]interface{}{ - "total": uint64(128), - "used": uint64(100), - "free": uint64(23), - "inodes_total": uint64(1234), - "inodes_free": uint64(234), - "inodes_used": uint64(1000), - "used_percent": float64(81.30081300813008), + "total": uint64(128), + "used": uint64(100), + "free": uint64(23), + "inodes_total": uint64(1234), + "inodes_free": uint64(234), + "inodes_used": uint64(1000), + "used_percent": float64(81.30081300813008), + "inodes_used_percent": float64(81.03727714748784), } fields2 := map[string]interface{}{ - "total": uint64(256), - "used": uint64(200), - "free": uint64(46), - "inodes_total": uint64(2468), - "inodes_free": uint64(468), - "inodes_used": uint64(2000), - "used_percent": float64(81.30081300813008), + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + "inodes_used_percent": float64(81.03727714748784), } fields3 := map[string]interface{}{ - "total": uint64(128), - "used": uint64(100), - "free": uint64(23), - "inodes_total": uint64(1234), - "inodes_free": uint64(234), - "inodes_used": uint64(1000), - "used_percent": float64(81.30081300813008), + "total": uint64(128), + "used": uint64(100), + "free": uint64(23), + "inodes_total": uint64(1234), + "inodes_free": uint64(234), + "inodes_used": uint64(1000), + "used_percent": float64(81.30081300813008), + "inodes_used_percent": float64(81.03727714748784), } acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) @@ -150,18 +153,18 @@ func TestDiskUsage(t *testing.T) { // and /home not matching the /dev in MountPoints err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) require.NoError(t, err) - require.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) + require.Equal(t, expectedAllDiskMetrics+8, acc.NFields()) // We should see all the diskpoints as MountPoints includes both // /, /home, and /var/rootbind err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home", "/var/rootbind"}}).Gather(&acc) require.NoError(t, err) - require.Equal(t, expectedAllDiskMetrics+7*4, acc.NFields()) + require.Equal(t, expectedAllDiskMetrics+8*4, acc.NFields()) // We should see all the mounts as MountPoints except the bind mound err = (&DiskStats{ps: &mps, IgnoreMountOpts: []string{"bind"}}).Gather(&acc) require.NoError(t, err) - require.Equal(t, expectedAllDiskMetrics+7*6, acc.NFields()) + require.Equal(t, expectedAllDiskMetrics+8*6, acc.NFields()) } func TestDiskUsageHostMountPrefix(t *testing.T) { @@ -196,13 +199,14 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { "mode": "ro", }, expectedFields: map[string]interface{}{ - "total": uint64(42), - "used": uint64(0), - "free": uint64(0), - "inodes_total": uint64(0), - "inodes_free": uint64(0), - "inodes_used": uint64(0), - "used_percent": float64(0), + "total": uint64(42), + "used": uint64(0), + "free": uint64(0), + "inodes_total": uint64(0), + "inodes_free": uint64(0), + "inodes_used": uint64(0), + "used_percent": float64(0), + "inodes_used_percent": float64(0), }, }, { @@ -229,13 +233,14 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { "mode": "ro", }, expectedFields: map[string]interface{}{ - "total": uint64(42), - "used": uint64(0), - "free": uint64(0), - "inodes_total": uint64(0), - "inodes_free": uint64(0), - "inodes_used": uint64(0), - "used_percent": float64(0), + "total": uint64(42), + "used": uint64(0), + "free": uint64(0), + "inodes_total": uint64(0), + "inodes_free": uint64(0), + "inodes_used": uint64(0), + "used_percent": float64(0), + "inodes_used_percent": float64(0), }, }, { @@ -262,13 +267,14 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { "mode": "ro", }, expectedFields: map[string]interface{}{ - "total": uint64(42), - "used": uint64(0), - "free": uint64(0), - "inodes_total": uint64(0), - "inodes_free": uint64(0), - "inodes_used": uint64(0), - "used_percent": float64(0), + "total": uint64(42), + "used": uint64(0), + "free": uint64(0), + "inodes_total": uint64(0), + "inodes_free": uint64(0), + "inodes_used": uint64(0), + "used_percent": float64(0), + "inodes_used_percent": float64(0), }, }, } @@ -424,7 +430,7 @@ func TestDiskStats(t *testing.T) { require.NoError(t, err) numDiskMetrics := acc.NFields() - expectedAllDiskMetrics := 21 + expectedAllDiskMetrics := 24 require.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ @@ -441,22 +447,24 @@ func TestDiskStats(t *testing.T) { } fields1 := map[string]interface{}{ - "total": uint64(128), - "used": uint64(100), - "free": uint64(23), - "inodes_total": uint64(1234), - "inodes_free": uint64(234), - "inodes_used": uint64(1000), - "used_percent": float64(81.30081300813008), + "total": uint64(128), + "used": uint64(100), + "free": uint64(23), + "inodes_total": uint64(1234), + "inodes_free": uint64(234), + "inodes_used": uint64(1000), + "used_percent": float64(81.30081300813008), + "inodes_used_percent": float64(81.03727714748784), } fields2 := map[string]interface{}{ - "total": uint64(256), - "used": uint64(200), - "free": uint64(46), - "inodes_total": uint64(2468), - "inodes_free": uint64(468), - "inodes_used": uint64(2000), - "used_percent": float64(81.30081300813008), + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + "inodes_used_percent": float64(81.03727714748784), } acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) @@ -465,18 +473,18 @@ func TestDiskStats(t *testing.T) { // and /home and /var/rootbind not matching the /dev in MountPoints err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) require.NoError(t, err) - require.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) + require.Equal(t, expectedAllDiskMetrics+8, acc.NFields()) // We should see all the diskpoints as MountPoints includes both // /, /home, and /var/rootbind err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home", "/var/rootbind"}}).Gather(&acc) require.NoError(t, err) - require.Equal(t, expectedAllDiskMetrics+7*4, acc.NFields()) + require.Equal(t, expectedAllDiskMetrics+8*4, acc.NFields()) // We should see all the mounts as MountPoints except the bind mound err = (&DiskStats{ps: &mps, IgnoreMountOpts: []string{"bind"}}).Gather(&acc) require.NoError(t, err) - require.Equal(t, expectedAllDiskMetrics+7*6, acc.NFields()) + require.Equal(t, expectedAllDiskMetrics+8*6, acc.NFields()) } func TestDiskUsageIssues(t *testing.T) { @@ -511,13 +519,14 @@ func TestDiskUsageIssues(t *testing.T) { "path": "/tmp", }, map[string]interface{}{ - "total": uint64(256), - "used": uint64(200), - "free": uint64(46), - "inodes_total": uint64(2468), - "inodes_free": uint64(468), - "inodes_used": uint64(2000), - "used_percent": float64(81.30081300813008), + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + "inodes_used_percent": float64(81.03727714748784), }, time.Unix(0, 0), telegraf.Gauge, @@ -531,13 +540,14 @@ func TestDiskUsageIssues(t *testing.T) { "path": "/", }, map[string]interface{}{ - "total": uint64(256), - "used": uint64(200), - "free": uint64(46), - "inodes_total": uint64(2468), - "inodes_free": uint64(468), - "inodes_used": uint64(2000), - "used_percent": float64(81.30081300813008), + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + "inodes_used_percent": float64(81.03727714748784), }, time.Unix(0, 0), telegraf.Gauge, @@ -565,13 +575,14 @@ func TestDiskUsageIssues(t *testing.T) { "path": "/", }, map[string]interface{}{ - "total": uint64(256), - "used": uint64(200), - "free": uint64(46), - "inodes_total": uint64(2468), - "inodes_free": uint64(468), - "inodes_used": uint64(2000), - "used_percent": float64(81.30081300813008), + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + "inodes_used_percent": float64(81.03727714748784), }, time.Unix(0, 0), telegraf.Gauge, @@ -585,13 +596,14 @@ func TestDiskUsageIssues(t *testing.T) { "path": "/mnt/storage", }, map[string]interface{}{ - "total": uint64(256), - "used": uint64(200), - "free": uint64(46), - "inodes_total": uint64(2468), - "inodes_free": uint64(468), - "inodes_used": uint64(2000), - "used_percent": float64(81.30081300813008), + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + "inodes_used_percent": float64(81.03727714748784), }, time.Unix(0, 0), telegraf.Gauge, diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md index fec9c4023d838..1841306f932a3 100644 --- a/plugins/inputs/dns_query/README.md +++ b/plugins/inputs/dns_query/README.md @@ -1,6 +1,6 @@ # DNS Query Input Plugin -The DNS plugin gathers dns query times in miliseconds - like +The DNS plugin gathers dns query times in milliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\)) ## Global configuration options diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go index b5521c5ea3f3a..067fc3b7568ef 100644 --- a/plugins/inputs/ecs/client.go +++ b/plugins/inputs/ecs/client.go @@ -12,12 +12,13 @@ import ( var ( // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html - ecsMetadataPath = "/v2/metadata" - ecsMetaStatsPath = "/v2/stats" + ecsMetadataPathV2 = "/v2/metadata" + ecsMetaStatsPathV2 = "/v2/stats" // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html - ecsMetadataPathV3 = "/task" - ecsMetaStatsPathV3 = "/task/stats" + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v4.html + ecsMetadataPath = "/task" + ecsMetaStatsPath = "/task/stats" ) // Client is the ECS client contract @@ -32,8 +33,8 @@ type httpClient interface { // NewClient constructs an ECS client with the passed configuration params func NewClient(timeout time.Duration, endpoint string, version int) (*EcsClient, error) { - if version != 2 && version != 3 { - const msg = "expected metadata version 2 or 3, got %d" + if version < 2 || version > 4 { + const msg = "expected metadata version 2, 3 or 4, got %d" return nil, fmt.Errorf(msg, version) } @@ -59,11 +60,12 @@ func resolveTaskURL(base *url.URL, version int) string { var path string switch version { case 2: - path = ecsMetadataPath + path = ecsMetadataPathV2 case 3: - path = ecsMetadataPathV3 + path = ecsMetadataPath + case 4: + path = ecsMetadataPath default: - // Should never happen. const msg = "resolveTaskURL: unexpected version %d" panic(fmt.Errorf(msg, version)) } @@ -74,9 +76,11 @@ func resolveStatsURL(base *url.URL, version int) string { var path string switch version { case 2: - path = ecsMetaStatsPath + path = ecsMetaStatsPathV2 case 3: - path = ecsMetaStatsPathV3 + path = ecsMetaStatsPath + case 4: + path = ecsMetaStatsPath default: // Should never happen. const msg = "resolveStatsURL: unexpected version %d" diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index ef6a475c16877..fc09c1a0351af 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -47,6 +47,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. servers = ["http://localhost:9200"] ## Timeout for HTTP requests to the elastic search server(s) + ## deprecated in 1.29.0; use 'timeout' instead http_timeout = "5s" ## When local is true (the default), the node will read only its own stats. @@ -94,6 +95,13 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as + ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). + ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is + ## provided, Telegraf will use the specified URL as HTTP proxy. + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" ## Sets the number of most recent indices to return for indices that are ## configured with a date-stamped suffix. Each 'indices_include' entry diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 2d23e4470cb0b..b3409291b4cae 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -2,6 +2,7 @@ package elasticsearch import ( + "context" _ "embed" "encoding/json" "errors" @@ -17,7 +18,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/plugins/common/tls" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" ) @@ -97,7 +98,7 @@ type indexStat struct { type Elasticsearch struct { Local bool `toml:"local"` Servers []string `toml:"servers"` - HTTPTimeout config.Duration `toml:"http_timeout"` + HTTPTimeout config.Duration `toml:"http_timeout" deprecated:"1.29.0;use 'timeout' instead"` ClusterHealth bool `toml:"cluster_health"` ClusterHealthLevel string `toml:"cluster_health_level"` ClusterStats bool `toml:"cluster_stats"` @@ -109,9 +110,11 @@ type Elasticsearch struct { Password string `toml:"password"` NumMostRecentIndices int `toml:"num_most_recent_indices"` - tls.ClientConfig + Log telegraf.Logger `toml:"-"` + + client *http.Client + httpconfig.HTTPClientConfig - client *http.Client serverInfo map[string]serverInfo serverInfoMutex sync.Mutex indexMatchers map[string]filter.Filter @@ -128,9 +131,12 @@ func (i serverInfo) isMaster() bool { // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { return &Elasticsearch{ - HTTPTimeout: config.Duration(time.Second * 5), ClusterStatsOnlyFromMaster: true, ClusterHealthLevel: "indices", + HTTPClientConfig: httpconfig.HTTPClientConfig{ + ResponseHeaderTimeout: config.Duration(5 * time.Second), + Timeout: config.Duration(5 * time.Second), + }, } } @@ -277,20 +283,12 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { } func (e *Elasticsearch) createHTTPClient() (*http.Client, error) { - tlsCfg, err := e.ClientConfig.TLSConfig() - if err != nil { - return nil, err - } - tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(e.HTTPTimeout), - TLSClientConfig: tlsCfg, + ctx := context.Background() + if e.HTTPTimeout != 0 { + e.HTTPClientConfig.Timeout = e.HTTPTimeout + e.HTTPClientConfig.ResponseHeaderTimeout = e.HTTPTimeout } - client := &http.Client{ - Transport: tr, - Timeout: time.Duration(e.HTTPTimeout), - } - - return client, nil + return e.HTTPClientConfig.CreateClient(ctx, e.Log) } func (e *Elasticsearch) nodeStatsURL(baseURL string) string { diff --git a/plugins/inputs/elasticsearch/sample.conf b/plugins/inputs/elasticsearch/sample.conf index 1c4bff938c173..361c99e80820a 100644 --- a/plugins/inputs/elasticsearch/sample.conf +++ b/plugins/inputs/elasticsearch/sample.conf @@ -6,6 +6,7 @@ servers = ["http://localhost:9200"] ## Timeout for HTTP requests to the elastic search server(s) + ## deprecated in 1.29.0; use 'timeout' instead http_timeout = "5s" ## When local is true (the default), the node will read only its own stats. @@ -53,6 +54,13 @@ # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as + ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). + ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is + ## provided, Telegraf will use the specified URL as HTTP proxy. + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" ## Sets the number of most recent indices to return for indices that are ## configured with a date-stamped suffix. Each 'indices_include' entry diff --git a/plugins/inputs/elasticsearch_query/README.md b/plugins/inputs/elasticsearch_query/README.md index 57a34fc3cfbb5..f09cef2a6fd92 100755 --- a/plugins/inputs/elasticsearch_query/README.md +++ b/plugins/inputs/elasticsearch_query/README.md @@ -55,6 +55,13 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as + ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). + ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is + ## provided, Telegraf will use the specified URL as HTTP proxy. + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" [[inputs.elasticsearch_query.aggregation]] ## measurement name for the results of the aggregation query diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go index 10f2039e96aa2..366008999bf6a 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/plugins/common/tls" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -28,15 +28,15 @@ type ElasticsearchQuery struct { Username string `toml:"username"` Password string `toml:"password"` EnableSniffer bool `toml:"enable_sniffer"` - Timeout config.Duration `toml:"timeout"` HealthCheckInterval config.Duration `toml:"health_check_interval"` Aggregations []esAggregation `toml:"aggregation"` Log telegraf.Logger `toml:"-"` - tls.ClientConfig httpclient *http.Client - esClient *elastic5.Client + httpconfig.HTTPClientConfig + + esClient *elastic5.Client } // esAggregation struct @@ -197,20 +197,8 @@ func (e *ElasticsearchQuery) Gather(acc telegraf.Accumulator) error { } func (e *ElasticsearchQuery) createHTTPClient() (*http.Client, error) { - tlsCfg, err := e.ClientConfig.TLSConfig() - if err != nil { - return nil, err - } - tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(e.Timeout), - TLSClientConfig: tlsCfg, - } - httpclient := &http.Client{ - Transport: tr, - Timeout: time.Duration(e.Timeout), - } - - return httpclient, nil + ctx := context.Background() + return e.HTTPClientConfig.CreateClient(ctx, e.Log) } func (e *ElasticsearchQuery) esAggregationQuery(acc telegraf.Accumulator, aggregation esAggregation, i int) error { @@ -242,8 +230,11 @@ func (e *ElasticsearchQuery) esAggregationQuery(acc telegraf.Accumulator, aggreg func init() { inputs.Add("elasticsearch_query", func() telegraf.Input { return &ElasticsearchQuery{ - Timeout: config.Duration(time.Second * 5), HealthCheckInterval: config.Duration(time.Second * 10), + HTTPClientConfig: httpconfig.HTTPClientConfig{ + ResponseHeaderTimeout: config.Duration(5 * time.Second), + Timeout: config.Duration(5 * time.Second), + }, } }) } diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go index f29184fa2ac87..9b06c79caf1f2 100644 --- a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go @@ -19,6 +19,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/testutil" ) @@ -536,9 +537,12 @@ func setupIntegrationTest(t *testing.T) (*testutil.Container, error) { "http://%s:%s", container.Address, container.Ports[servicePort], ) e := &ElasticsearchQuery{ - URLs: []string{url}, - Timeout: config.Duration(time.Second * 30), - Log: testutil.Logger{}, + URLs: []string{url}, + HTTPClientConfig: httpconfig.HTTPClientConfig{ + ResponseHeaderTimeout: config.Duration(30 * time.Second), + Timeout: config.Duration(30 * time.Second), + }, + Log: testutil.Logger{}, } err = e.connectToES() @@ -612,8 +616,11 @@ func TestElasticsearchQueryIntegration(t *testing.T) { URLs: []string{ fmt.Sprintf("http://%s:%s", container.Address, container.Ports[servicePort]), }, - Timeout: config.Duration(time.Second * 30), - Log: testutil.Logger{}, + HTTPClientConfig: httpconfig.HTTPClientConfig{ + ResponseHeaderTimeout: config.Duration(30 * time.Second), + Timeout: config.Duration(30 * time.Second), + }, + Log: testutil.Logger{}, } err = e.connectToES() @@ -675,8 +682,11 @@ func TestElasticsearchQueryIntegration_getMetricFields(t *testing.T) { URLs: []string{ fmt.Sprintf("http://%s:%s", container.Address, container.Ports[servicePort]), }, - Timeout: config.Duration(time.Second * 30), - Log: testutil.Logger{}, + HTTPClientConfig: httpconfig.HTTPClientConfig{ + ResponseHeaderTimeout: config.Duration(30 * time.Second), + Timeout: config.Duration(30 * time.Second), + }, + Log: testutil.Logger{}, } err = e.connectToES() diff --git a/plugins/inputs/elasticsearch_query/sample.conf b/plugins/inputs/elasticsearch_query/sample.conf index 29981c2bc167c..22d9432aa69cc 100644 --- a/plugins/inputs/elasticsearch_query/sample.conf +++ b/plugins/inputs/elasticsearch_query/sample.conf @@ -26,6 +26,13 @@ # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as + ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). + ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is + ## provided, Telegraf will use the specified URL as HTTP proxy. + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" [[inputs.elasticsearch_query.aggregation]] ## measurement name for the results of the aggregation query diff --git a/plugins/inputs/example/example.go b/plugins/inputs/example/example.go index 48df55ec562a9..996d426d3f52b 100644 --- a/plugins/inputs/example/example.go +++ b/plugins/inputs/example/example.go @@ -55,7 +55,7 @@ func (m *Example) Init() error { } // Set your defaults. - // Please note: In golang all fields are initialzed to their nil value, so you should not + // Please note: In golang all fields are initialized to their nil value, so you should not // set these fields if the nil value is what you want (e.g. for booleans). if m.NumberFields < 1 { m.Log.Debugf("Setting number of fields to default from invalid value %d", m.NumberFields) @@ -75,7 +75,7 @@ func (m *Example) Init() error { } defer password.Destroy() - // Initialze your internal states + // Initialize your internal states m.count = 1 return nil diff --git a/plugins/inputs/example/example_test.go b/plugins/inputs/example/example_test.go index d744449ba9613..f4ea9bb823fa4 100644 --- a/plugins/inputs/example/example_test.go +++ b/plugins/inputs/example/example_test.go @@ -20,7 +20,7 @@ func TestInitDefault(t *testing.T) { // This test should succeed with the default initialization. // Use whatever you use in the init() function plus the mandatory options. - // ATTENTION: Always initialze the "Log" as you will get SIGSEGV otherwise. + // ATTENTION: Always initialize the "Log" as you will get SIGSEGV otherwise. plugin := &Example{ DeviceName: "test", Timeout: config.Duration(100 * time.Millisecond), @@ -42,7 +42,7 @@ func TestInitFail(t *testing.T) { // and check if you reach them // We setup a table-test here to specify "setting" - "expected error" values. - // Eventhough it seems overkill here for the example plugin, we reuse this structure + // Even though it seems overkill here for the example plugin, we reuse this structure // later for checking the metrics tests := []struct { name string @@ -58,7 +58,7 @@ func TestInitFail(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Always initialze the logger to avoid SIGSEGV. This is done automatically by + // Always initialize the logger to avoid SIGSEGV. This is done automatically by // telegraf during normal operation. tt.plugin.Log = testutil.Logger{} err := tt.plugin.Init() @@ -225,8 +225,8 @@ func TestFixedValue(t *testing.T) { acc.Wait(len(tt.expected)) // Compare the metrics in a convenient way. Here we ignore - // the metric time during comparision as we cannot inject the time - // during test. For more comparision options check testutil package. + // the metric time during comparison as we cannot inject the time + // during test. For more comparison options check testutil package. testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 3ac20f0feac05..edf8ef5583482 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -98,7 +98,7 @@ func TestExec(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(e.Gather) require.NoError(t, err) - require.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") + require.Equal(t, 8, acc.NFields(), "non-numeric measurements should be ignored") fields := map[string]interface{}{ "num_processes": float64(82), @@ -125,7 +125,7 @@ func TestExecMalformed(t *testing.T) { var acc testutil.Accumulator require.Error(t, acc.GatherError(e.Gather)) - require.Equal(t, acc.NFields(), 0, "No new points should have been added") + require.Equal(t, 0, acc.NFields(), "No new points should have been added") } func TestCommandError(t *testing.T) { @@ -140,7 +140,7 @@ func TestCommandError(t *testing.T) { var acc testutil.Accumulator require.Error(t, acc.GatherError(e.Gather)) - require.Equal(t, acc.NFields(), 0, "No new points should have been added") + require.Equal(t, 0, acc.NFields(), "No new points should have been added") } func TestExecCommandWithGlob(t *testing.T) { diff --git a/plugins/inputs/fireboard/README.md b/plugins/inputs/fireboard/README.md index 7677b79820cd7..848f74eb34833 100644 --- a/plugins/inputs/fireboard/README.md +++ b/plugins/inputs/fireboard/README.md @@ -53,7 +53,7 @@ values are included if they are less than a minute old. - fireboard - tags: - channel - - scale (Celcius; Farenheit) + - scale (Celsius; Fahrenheit) - title (name of the Fireboard) - uuid (UUID of the Fireboard) - fields: @@ -66,5 +66,5 @@ This section shows example output in Line Protocol format. You can often use this information. ```text -fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 +fireboard,channel=2,host=patas-mbp,scale=Fahrenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 ``` diff --git a/plugins/inputs/gnmi/README.md b/plugins/inputs/gnmi/README.md index 6802f1d11d128..f7da3f9a09c53 100644 --- a/plugins/inputs/gnmi/README.md +++ b/plugins/inputs/gnmi/README.md @@ -88,7 +88,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## * Juniper Header Extension (juniper_header): some sensors are directly managed by ## Linecard, which adds the Juniper GNMI Header Extension. Enabling this ## allows the decoding of the Extension header if present. Currently this knob - ## adds component, component_id & sub_component_id as additionnal tags + ## adds component, component_id & sub_component_id as additional tags # vendor_specific = [] ## Define additional aliases to map encoding paths to measurement names diff --git a/plugins/inputs/gnmi/path.go b/plugins/inputs/gnmi/path.go index e00af0dd63946..e6ac208f8e48f 100644 --- a/plugins/inputs/gnmi/path.go +++ b/plugins/inputs/gnmi/path.go @@ -8,7 +8,7 @@ import ( ) // Regular expression to see if a path element contains an origin -var originPattern = regexp.MustCompile(`^([\w-_]+):`) +var originPattern = regexp.MustCompile(`^([\w-]+):`) type keySegment struct { name string diff --git a/plugins/inputs/gnmi/sample.conf b/plugins/inputs/gnmi/sample.conf index 7e330f79d6a35..f2c5989f09b3c 100644 --- a/plugins/inputs/gnmi/sample.conf +++ b/plugins/inputs/gnmi/sample.conf @@ -49,7 +49,7 @@ ## * Juniper Header Extension (juniper_header): some sensors are directly managed by ## Linecard, which adds the Juniper GNMI Header Extension. Enabling this ## allows the decoding of the Extension header if present. Currently this knob - ## adds component, component_id & sub_component_id as additionnal tags + ## adds component, component_id & sub_component_id as additional tags # vendor_specific = [] ## Define additional aliases to map encoding paths to measurement names diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index 44be91bb28bf9..66ed52b585b14 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -43,7 +43,7 @@ func TestFetch(t *testing.T) { err := hddTemp.Gather(acc) require.NoError(t, err) - require.Equal(t, acc.NFields(), 2) + require.Equal(t, 2, acc.NFields()) var tests = []struct { fields map[string]interface{} diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index fec583d52566b..713489b6dfb0b 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -128,7 +128,7 @@ func (h *HTTP) gatherURL(acc telegraf.Accumulator, url string) error { token.Destroy() request.Header.Set("Authorization", bearer) } else if h.TokenFile != "" { - token, err := os.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.TokenFile) if err != nil { return err } diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 4eff3eee390d0..94ebefd6ab0bb 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -61,7 +61,7 @@ func TestHTTPWithJSONFormat(t *testing.T) { var metric = acc.Metrics[0] require.Equal(t, metric.Measurement, metricName) require.Len(t, acc.Metrics[0].Fields, 1) - require.Equal(t, acc.Metrics[0].Fields["a"], 1.2) + require.Equal(t, 1.2, acc.Metrics[0].Fields["a"]) require.Equal(t, acc.Metrics[0].Tags["url"], address) } @@ -282,7 +282,7 @@ func TestBodyAndContentEncoding(t *testing.T) { Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.Header.Get("Content-Encoding"), "gzip") + require.Equal(t, "gzip", r.Header.Get("Content-Encoding")) gr, err := gzip.NewReader(r.Body) require.NoError(t, err) diff --git a/plugins/inputs/intel_powerstat/README.md b/plugins/inputs/intel_powerstat/README.md index 05c15936fd776..76f3400278818 100644 --- a/plugins/inputs/intel_powerstat/README.md +++ b/plugins/inputs/intel_powerstat/README.md @@ -3,6 +3,9 @@ This input plugin monitors power statistics on Intel-based platforms and assumes presence of Linux based OS. +Not all CPUs are supported, please see the software and hardware dependencies +sections below to ensure platform support. + Main use cases are power saving and workload migration. Telemetry frameworks allow users to monitor critical platform level metrics. Key source of platform telemetry is power domain that is beneficial for MANO Monitoring&Analytics @@ -22,7 +25,8 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ```toml @sample.conf # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) -# and per-CPU metrics like temperature, power and utilization. +# and per-CPU metrics like temperature, power and utilization. Please see the +# plugin readme for details on software and hardware compatability. # This plugin ONLY supports Linux [[inputs.intel_powerstat]] ## The user can choose which package metrics are monitored by the plugin with @@ -246,6 +250,21 @@ are required by the plugin: | 0xAC | Intel MeteorLake | | 0xAA | Intel MeteorLake-L | +### uncore frequency + +Note that only certain processors support the uncore frequency module as well: + +| Model number | Processor name | +|--------------|---------------------------------| +| 0x55 | Intel Skylake-X | +| 0x6A | Intel IceLake-X | +| 0x6C | Intel IceLake-D | +| 0x47 | Intel Broadwell-G | +| 0x4F | Intel Broadwell-X | +| 0x56 | Intel Broadwell-D | +| 0x8F | Intel Sapphire Rapids X | +| 0xCF | Intel Emerald Rapids X | + ## Metrics All metrics collected by Intel PowerStat plugin are collected in fixed diff --git a/plugins/inputs/intel_powerstat/msr.go b/plugins/inputs/intel_powerstat/msr.go index 52690c4a101e5..8d30f954e2830 100644 --- a/plugins/inputs/intel_powerstat/msr.go +++ b/plugins/inputs/intel_powerstat/msr.go @@ -184,7 +184,7 @@ func (m *msrServiceImpl) readSingleMsr(core string, msr string) (uint64, error) case msrFSBFreqString: msrAddress = fsbFreq default: - return 0, fmt.Errorf("incorect name of MSR %s", msr) + return 0, fmt.Errorf("incorrect name of MSR %s", msr) } value, err := m.fs.readFileAtOffsetToUint64(msrFile, msrAddress) diff --git a/plugins/inputs/intel_powerstat/sample.conf b/plugins/inputs/intel_powerstat/sample.conf index 6ffa36511a436..fdad448fd36ab 100644 --- a/plugins/inputs/intel_powerstat/sample.conf +++ b/plugins/inputs/intel_powerstat/sample.conf @@ -1,5 +1,6 @@ # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) -# and per-CPU metrics like temperature, power and utilization. +# and per-CPU metrics like temperature, power and utilization. Please see the +# plugin readme for details on software and hardware compatability. # This plugin ONLY supports Linux [[inputs.intel_powerstat]] ## The user can choose which package metrics are monitored by the plugin with diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index e888187f9be38..1b927d96b88c5 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -309,8 +309,8 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati pids, err := findPIDsInMeasurement(out) if err != nil { - r.errorChan <- err - break + r.Log.Warnf("Skipping measurement: %v", err) + continue } for processName, PIDsProcess := range processesPIDsAssociation { if pids == PIDsProcess { diff --git a/plugins/inputs/internal/internal_test.go b/plugins/inputs/internal/internal_test.go index d5c3d79581c1d..b5f539ece6aa4 100644 --- a/plugins/inputs/internal/internal_test.go +++ b/plugins/inputs/internal/internal_test.go @@ -4,10 +4,10 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/require" ) func TestSelfPlugin(t *testing.T) { @@ -100,7 +100,7 @@ func TestGostats(t *testing.T) { } require.NotNil(t, metric) - require.Equal(t, metric.Measurement, "internal_gostats") + require.Equal(t, "internal_gostats", metric.Measurement) require.Len(t, metric.Tags, 1) require.Contains(t, metric.Tags, "go_version") diff --git a/plugins/inputs/internet_speed/internet_speed.go b/plugins/inputs/internet_speed/internet_speed.go index bf8706c258110..a28c04b0afe3c 100644 --- a/plugins/inputs/internet_speed/internet_speed.go +++ b/plugins/inputs/internet_speed/internet_speed.go @@ -121,9 +121,14 @@ func (is *InternetSpeed) Gather(acc telegraf.Accumulator) error { } func (is *InternetSpeed) findClosestServer() error { + proto := speedtest.HTTP + if os.Getegid() <= 0 { + proto = speedtest.ICMP + } + client := speedtest.New(speedtest.WithUserConfig(&speedtest.UserConfig{ UserAgent: internal.ProductToken(), - ICMP: os.Geteuid() == 0 || os.Geteuid() == -1, + PingMode: proto, SavingMode: is.MemorySavingMode, })) if is.Connections > 0 { diff --git a/plugins/inputs/internet_speed/internet_speed_test.go b/plugins/inputs/internet_speed/internet_speed_test.go index 4dbafe3731b01..2549a3c0b2fe4 100644 --- a/plugins/inputs/internet_speed/internet_speed_test.go +++ b/plugins/inputs/internet_speed/internet_speed_test.go @@ -15,9 +15,9 @@ func TestGathering(t *testing.T) { MemorySavingMode: true, Log: testutil.Logger{}, } + require.NoError(t, internetSpeed.Init()) acc := &testutil.Accumulator{} - require.NoError(t, internetSpeed.Gather(acc)) } @@ -29,16 +29,12 @@ func TestDataGen(t *testing.T) { MemorySavingMode: true, Log: testutil.Logger{}, } + require.NoError(t, internetSpeed.Init()) acc := &testutil.Accumulator{} require.NoError(t, internetSpeed.Gather(acc)) metric, ok := acc.Get("internet_speed") require.True(t, ok) - - tags := metric.Tags - - fields := metric.Fields - - acc.AssertContainsTaggedFields(t, "internet_speed", fields, tags) + acc.AssertContainsTaggedFields(t, "internet_speed", metric.Fields, metric.Tags) } diff --git a/plugins/inputs/ipset/ipset.go b/plugins/inputs/ipset/ipset.go index 4dfee2da716da..88ce04072356e 100644 --- a/plugins/inputs/ipset/ipset.go +++ b/plugins/inputs/ipset/ipset.go @@ -73,18 +73,31 @@ func (i *Ipset) Gather(acc telegraf.Accumulator) error { "set": data[1], "rule": data[2], } - packetsTotal, err := strconv.ParseUint(data[4], 10, 64) - if err != nil { - acc.AddError(err) - } - bytesTotal, err := strconv.ParseUint(data[6], 10, 64) - if err != nil { - acc.AddError(err) - } - fields := map[string]interface{}{ - "packets_total": packetsTotal, - "bytes_total": bytesTotal, + + fields := make(map[string]interface{}, 3) + for i, field := range data { + switch field { + case "timeout": + val, err := strconv.ParseUint(data[i+1], 10, 64) + if err != nil { + acc.AddError(err) + } + fields["timeout"] = val + case "packets": + val, err := strconv.ParseUint(data[i+1], 10, 64) + if err != nil { + acc.AddError(err) + } + fields["packets_total"] = val + case "bytes": + val, err := strconv.ParseUint(data[i+1], 10, 64) + if err != nil { + acc.AddError(err) + } + fields["bytes_total"] = val + } } + acc.AddCounter(measurement, fields, tags) } } diff --git a/plugins/inputs/ipset/ipset_test.go b/plugins/inputs/ipset/ipset_test.go index f205728c0dbad..117fcc3eaee44 100644 --- a/plugins/inputs/ipset/ipset_test.go +++ b/plugins/inputs/ipset/ipset_test.go @@ -74,6 +74,22 @@ func TestIpset(t *testing.T) { {map[string]interface{}{"packets_total": uint64(3), "bytes_total": uint64(222)}}, }, }, + { + name: "Sets with and without timeouts", + value: `create counter-test hash:ip family inet hashsize 1024 maxelem 65536 timeout 1800 counters + add counter-test 192.168.1.1 timeout 1792 packets 8 bytes 672 + create counter-test2 hash:ip family inet hashsize 1024 maxelem 65536 counters + add counter-test2 192.168.1.1 packets 18 bytes 673 + `, + tags: []map[string]string{ + {"set": "counter-test", "rule": "192.168.1.1"}, + {"set": "counter-test2", "rule": "192.168.1.1"}, + }, + fields: [][]map[string]interface{}{ + {map[string]interface{}{"packets_total": uint64(8), "bytes_total": uint64(672), "timeout": uint64(1792)}}, + {map[string]interface{}{"packets_total": uint64(18), "bytes_total": uint64(673)}}, + }, + }, } for i, tt := range tests { diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index d39d15c8ba5d9..8266754ea2052 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -66,9 +66,9 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Empty this field will use default value 5 # max_connections = 5 - ## When set to true will add node labels as a comma-seperated tag. If none, + ## When set to true will add node labels as a comma-separated tag. If none, ## are found, then a tag with the value of 'none' is used. Finally, if a - ## lable contains a comma it is replaced with an underscore. + ## label contains a comma it is replaced with an underscore. # node_labels_as_tag = false ``` diff --git a/plugins/inputs/jenkins/sample.conf b/plugins/inputs/jenkins/sample.conf index f8a0cb717617c..9aa4bd7097ecf 100644 --- a/plugins/inputs/jenkins/sample.conf +++ b/plugins/inputs/jenkins/sample.conf @@ -46,7 +46,7 @@ ## Empty this field will use default value 5 # max_connections = 5 - ## When set to true will add node labels as a comma-seperated tag. If none, + ## When set to true will add node labels as a comma-separated tag. If none, ## are found, then a tag with the value of 'none' is used. Finally, if a - ## lable contains a comma it is replaced with an underscore. + ## label contains a comma it is replaced with an underscore. # node_labels_as_tag = false diff --git a/plugins/inputs/jolokia2_agent/jolokia2_agent_test.go b/plugins/inputs/jolokia2_agent/jolokia2_agent_test.go index 43ff6a53e5d76..a7082fa514b02 100644 --- a/plugins/inputs/jolokia2_agent/jolokia2_agent_test.go +++ b/plugins/inputs/jolokia2_agent/jolokia2_agent_test.go @@ -826,7 +826,7 @@ func TestIntegrationArtemis(t *testing.T) { require.NoError(t, plugin.Gather(&acc)) actual := acc.GetTelegrafMetrics() - testutil.RequireMetricsStructureEqual(t, expected, actual, testutil.IgnoreTime()) + testutil.RequireMetricsStructureEqual(t, expected, actual, testutil.SortMetrics(), testutil.IgnoreTime()) } func setupServer(resp string) *httptest.Server { diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 9474d0e669dea..34aa8baa4f21a 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -68,6 +68,11 @@ to use them. ## are not available # msg_headers_to_tags = [] + ## The name of kafka message header which value should override the metric name. + ## In case when the same header specified in current option and in msg_headers_to_tags + ## option, it will be excluded from the msg_headers_to_tags list. + # msg_header_as_metric_name = "" + ## Optional Client id # client_id = "Telegraf" diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index fc86f56e82490..6262fb6d8ce97 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -46,6 +46,7 @@ type KafkaConsumer struct { TopicRegexps []string `toml:"topic_regexps"` TopicTag string `toml:"topic_tag"` MsgHeadersAsTags []string `toml:"msg_headers_as_tags"` + MsgHeaderAsMetricName string `toml:"msg_header_as_metric_name"` ConsumerFetchDefault config.Size `toml:"consumer_fetch_default"` ConnectionStrategy string `toml:"connection_strategy"` @@ -136,11 +137,11 @@ func (k *KafkaConsumer) Init() error { switch strings.ToLower(k.BalanceStrategy) { case "range", "": - cfg.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.BalanceStrategyRange} + cfg.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()} case "roundrobin": - cfg.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.BalanceStrategyRoundRobin} + cfg.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()} case "sticky": - cfg.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.BalanceStrategySticky} + cfg.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()} default: return fmt.Errorf("invalid balance strategy %q", k.BalanceStrategy) } @@ -321,11 +322,14 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser, k.Log) handler.MaxMessageLen = k.MaxMessageLen handler.TopicTag = k.TopicTag + handler.MsgHeaderToMetricName = k.MsgHeaderAsMetricName //if message headers list specified, put it as map to handler msgHeadersMap := make(map[string]bool, len(k.MsgHeadersAsTags)) if len(k.MsgHeadersAsTags) > 0 { for _, header := range k.MsgHeadersAsTags { - msgHeadersMap[header] = true + if k.MsgHeaderAsMetricName != header { + msgHeadersMap[header] = true + } } } handler.MsgHeadersToTags = msgHeadersMap @@ -390,9 +394,10 @@ func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parse // ConsumerGroupHandler is a sarama.ConsumerGroupHandler implementation. type ConsumerGroupHandler struct { - MaxMessageLen int - TopicTag string - MsgHeadersToTags map[string]bool + MaxMessageLen int + TopicTag string + MsgHeadersToTags map[string]bool + MsgHeaderToMetricName string acc telegraf.TrackingAccumulator sem semaphore @@ -482,9 +487,9 @@ func (h *ConsumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg * return err } - // Check if any message header should be pass as tag headerKey := "" - if len(h.MsgHeadersToTags) > 0 { + // Check if any message header should override metric name or should be pass as tag + if len(h.MsgHeadersToTags) > 0 || h.MsgHeaderToMetricName != "" { for _, header := range msg.Headers { //convert to a string as the header and value are byte arrays. headerKey = string(header.Key) @@ -493,6 +498,12 @@ func (h *ConsumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg * for _, metric := range metrics { metric.AddTag(headerKey, string(header.Value)) } + } else { + if h.MsgHeaderToMetricName == headerKey { + for _, metric := range metrics { + metric.SetName(string(header.Value)) + } + } } } } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 3e40ad5ed8d6b..5bb46f1e57bc2 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" - "github.com/testcontainers/testcontainers-go/wait" + kafkacontainer "github.com/testcontainers/testcontainers-go/modules/kafka" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -70,11 +70,11 @@ func TestInit(t *testing.T) { name: "default config", plugin: &KafkaConsumer{}, check: func(t *testing.T, plugin *KafkaConsumer) { - require.Equal(t, plugin.ConsumerGroup, defaultConsumerGroup) - require.Equal(t, plugin.MaxUndeliveredMessages, defaultMaxUndeliveredMessages) - require.Equal(t, plugin.config.ClientID, "Telegraf") - require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetOldest) - require.Equal(t, plugin.config.Consumer.MaxProcessingTime, 100*time.Millisecond) + require.Equal(t, defaultConsumerGroup, plugin.ConsumerGroup) + require.Equal(t, defaultMaxUndeliveredMessages, plugin.MaxUndeliveredMessages) + require.Equal(t, "Telegraf", plugin.config.ClientID) + require.Equal(t, sarama.OffsetOldest, plugin.config.Consumer.Offsets.Initial) + require.Equal(t, 100*time.Millisecond, plugin.config.Consumer.MaxProcessingTime) }, }, { @@ -114,7 +114,7 @@ func TestInit(t *testing.T) { Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { - require.Equal(t, plugin.config.ClientID, "custom") + require.Equal(t, "custom", plugin.config.ClientID) }, }, { @@ -124,7 +124,7 @@ func TestInit(t *testing.T) { Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { - require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetNewest) + require.Equal(t, sarama.OffsetNewest, plugin.config.Consumer.Offsets.Initial) }, }, { @@ -197,7 +197,7 @@ func TestInit(t *testing.T) { Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { - require.Equal(t, plugin.config.Consumer.MaxProcessingTime, 1000*time.Millisecond) + require.Equal(t, 1000*time.Millisecond, plugin.config.Consumer.MaxProcessingTime) }, }, } @@ -484,58 +484,20 @@ func TestKafkaRoundTripIntegration(t *testing.T) { }{ {"connection strategy startup", "startup", []string{"Test"}, nil, config.Duration(0)}, {"connection strategy defer", "defer", []string{"Test"}, nil, config.Duration(0)}, - {"topic regexp", "startup", nil, []string{"T*"}, config.Duration(5 * time.Second)}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - t.Logf("rt: starting network") ctx := context.Background() - networkName := "telegraf-test-kafka-consumer-network" - network, err := testcontainers.GenericNetwork(ctx, testcontainers.GenericNetworkRequest{ - NetworkRequest: testcontainers.NetworkRequest{ - Name: networkName, - Attachable: true, - CheckDuplicate: true, - }, - }) + kafkaContainer, err := kafkacontainer.RunContainer(ctx, + kafkacontainer.WithClusterID("test-cluster"), + testcontainers.WithImage("confluentinc/confluent-local:7.5.0"), + ) require.NoError(t, err) - defer func() { - require.NoError(t, network.Remove(ctx), "terminating network failed") - }() - - t.Logf("rt: starting zookeeper") - zookeeperName := "telegraf-test-kafka-consumer-zookeeper" - zookeeper := testutil.Container{ - Image: "wurstmeister/zookeeper", - ExposedPorts: []string{"2181:2181"}, - Networks: []string{networkName}, - WaitingFor: wait.ForLog("binding to port"), - Name: zookeeperName, - } - require.NoError(t, zookeeper.Start(), "failed to start container") - defer zookeeper.Terminate() - - t.Logf("rt: starting broker") - container := testutil.Container{ - Name: "telegraf-test-kafka-consumer", - Image: "wurstmeister/kafka", - ExposedPorts: []string{"9092:9092"}, - Env: map[string]string{ - "KAFKA_ADVERTISED_HOST_NAME": "localhost", - "KAFKA_ADVERTISED_PORT": "9092", - "KAFKA_ZOOKEEPER_CONNECT": fmt.Sprintf("%s:%s", zookeeperName, zookeeper.Ports["2181"]), - "KAFKA_CREATE_TOPICS": fmt.Sprintf("%s:1:1", "Test"), - }, - Networks: []string{networkName}, - WaitingFor: wait.ForLog("Log loaded for partition Test-0 with initial high watermark 0"), - } - require.NoError(t, container.Start(), "failed to start container") - defer container.Terminate() + defer kafkaContainer.Terminate(ctx) //nolint:errcheck // ignored - brokers := []string{ - fmt.Sprintf("%s:%s", container.Address, container.Ports["9092"]), - } + brokers, err := kafkaContainer.Brokers(ctx) + require.NoError(t, err) // Make kafka output t.Logf("rt: starting output plugin") diff --git a/plugins/inputs/kafka_consumer/sample.conf b/plugins/inputs/kafka_consumer/sample.conf index 1e460e2c368c5..afefd722e677d 100644 --- a/plugins/inputs/kafka_consumer/sample.conf +++ b/plugins/inputs/kafka_consumer/sample.conf @@ -28,6 +28,11 @@ ## are not available # msg_headers_to_tags = [] + ## The name of kafka message header which value should override the metric name. + ## In case when the same header specified in current option and in msg_headers_to_tags + ## option, it will be excluded from the msg_headers_to_tags list. + # msg_header_as_metric_name = "" + ## Optional Client id # client_id = "Telegraf" diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go index 1cc05f0ef976d..372f25d0cd9ba 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go @@ -5,7 +5,7 @@ import ( "testing" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/parsers/influx" diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go index 740a9dced2974..1aeeefc4d7909 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go @@ -5,13 +5,12 @@ import ( "testing" "github.com/Shopify/sarama" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/require" ) const ( @@ -52,7 +51,7 @@ func TestRunParser(t *testing.T) { in <- saramaMsg(testMsg) acc.Wait(1) - require.Equal(t, acc.NFields(), 1) + require.Equal(t, 1, acc.NFields()) } // Test that the parser ignores invalid messages @@ -70,7 +69,7 @@ func TestRunParserInvalidMsg(t *testing.T) { in <- saramaMsg(invalidMsg) acc.WaitError(1) - require.Equal(t, acc.NFields(), 0) + require.Equal(t, 0, acc.NFields()) } // Test that overlong messages are dropped @@ -87,7 +86,7 @@ func TestDropOverlongMsg(t *testing.T) { in <- saramaMsg(overlongMsg) acc.WaitError(1) - require.Equal(t, acc.NFields(), 0) + require.Equal(t, 0, acc.NFields()) } // Test that the parser parses kafka messages into points @@ -107,7 +106,7 @@ func TestRunParserAndGather(t *testing.T) { require.NoError(t, acc.GatherError(k.Gather)) - require.Equal(t, acc.NFields(), 1) + require.Equal(t, 1, acc.NFields()) acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) } @@ -128,7 +127,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) { require.NoError(t, acc.GatherError(k.Gather)) - require.Equal(t, acc.NFields(), 1) + require.Equal(t, 1, acc.NFields()) acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) } @@ -151,7 +150,7 @@ func TestRunParserAndGatherJSON(t *testing.T) { require.NoError(t, acc.GatherError(k.Gather)) - require.Equal(t, acc.NFields(), 2) + require.Equal(t, 2, acc.NFields()) acc.AssertContainsFields(t, "kafka_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/kernel/README.md b/plugins/inputs/kernel/README.md index a6490540765fd..2ea010bc22a01 100644 --- a/plugins/inputs/kernel/README.md +++ b/plugins/inputs/kernel/README.md @@ -39,7 +39,7 @@ processes 86031 Number of forks since boot. ``` -Kernel Samepage Merging is generally documented in [kernel documenation][1] and +Kernel Samepage Merging is generally documented in [kernel documentation][1] and the available metrics exposed via sysfs are documented in [admin guide][2] [1]: https://www.kernel.org/doc/html/latest/mm/ksm.html diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md index c8e3267f14104..ca2c19478535c 100644 --- a/plugins/inputs/kibana/README.md +++ b/plugins/inputs/kibana/README.md @@ -37,6 +37,13 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as + ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). + ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is + ## provided, Telegraf will use the specified URL as HTTP proxy. + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" ``` ## Metrics diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 1beb86a70fae5..c00a521015c67 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -2,6 +2,7 @@ package kibana import ( + "context" _ "embed" "encoding/json" "fmt" @@ -14,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/plugins/common/tls" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -90,15 +91,18 @@ type Kibana struct { Servers []string Username string Password string - Timeout config.Duration - tls.ClientConfig + + Log telegraf.Logger `toml:"-"` client *http.Client + httpconfig.HTTPClientConfig } func NewKibana() *Kibana { return &Kibana{ - Timeout: config.Duration(time.Second * 5), + HTTPClientConfig: httpconfig.HTTPClientConfig{ + Timeout: config.Duration(5 * time.Second), + }, } } @@ -147,19 +151,8 @@ func (k *Kibana) Gather(acc telegraf.Accumulator) error { } func (k *Kibana) createHTTPClient() (*http.Client, error) { - tlsCfg, err := k.ClientConfig.TLSConfig() - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - }, - Timeout: time.Duration(k.Timeout), - } - - return client, nil + ctx := context.Background() + return k.HTTPClientConfig.CreateClient(ctx, k.Log) } func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) error { diff --git a/plugins/inputs/kibana/sample.conf b/plugins/inputs/kibana/sample.conf index 577cde8814d59..3d58a311ae4b7 100644 --- a/plugins/inputs/kibana/sample.conf +++ b/plugins/inputs/kibana/sample.conf @@ -16,3 +16,10 @@ # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as + ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). + ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is + ## provided, Telegraf will use the specified URL as HTTP proxy. + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" diff --git a/plugins/inputs/libvirt/libvirt_test.go b/plugins/inputs/libvirt/libvirt_test.go index a080db6248510..eb137a031b655 100644 --- a/plugins/inputs/libvirt/libvirt_test.go +++ b/plugins/inputs/libvirt/libvirt_test.go @@ -299,7 +299,7 @@ func TestLibvirt_calculateMetricNumber(t *testing.T) { } err := l.calculateMetricNumber() require.NoError(t, err) - require.Equal(t, l.metricNumber, domainStatsAll) + require.Equal(t, domainStatsAll, l.metricNumber) }) } diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 5ce5cc8fa7872..858a7cecc240f 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -136,7 +136,7 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { acc := testutil.Accumulator{} require.NoError(t, logparser.Start(&acc)) - require.Equal(t, acc.NFields(), 0) + require.Equal(t, 0, acc.NFields()) input, err := os.ReadFile(filepath.Join(testdataDir, "test_a.log")) require.NoError(t, err) diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md index a5443bc8db5c8..581701dcb410d 100644 --- a/plugins/inputs/logstash/README.md +++ b/plugins/inputs/logstash/README.md @@ -44,6 +44,13 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Use TLS but skip chain & host verification. # insecure_skip_verify = false + + ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as + ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). + ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is + ## provided, Telegraf will use the specified URL as HTTP proxy. + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" ## Optional HTTP headers. # [inputs.logstash.headers] diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index ac521e86552d0..b09b94c5ca0dd 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -2,6 +2,7 @@ package logstash import ( + "context" _ "embed" "encoding/json" "fmt" @@ -14,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" - "github.com/influxdata/telegraf/plugins/common/tls" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" jsonParser "github.com/influxdata/telegraf/plugins/parsers/json" ) @@ -31,10 +32,11 @@ type Logstash struct { Username string `toml:"username"` Password string `toml:"password"` Headers map[string]string `toml:"headers"` - Timeout config.Duration `toml:"timeout"` - tls.ClientConfig + + Log telegraf.Logger `toml:"-"` client *http.Client + httpconfig.HTTPClientConfig } // NewLogstash create an instance of the plugin with default settings @@ -44,7 +46,9 @@ func NewLogstash() *Logstash { SinglePipeline: false, Collect: []string{"pipelines", "process", "jvm"}, Headers: make(map[string]string), - Timeout: config.Duration(time.Second * 5), + HTTPClientConfig: httpconfig.HTTPClientConfig{ + Timeout: config.Duration(5 * time.Second), + }, } } @@ -131,19 +135,8 @@ func (logstash *Logstash) Init() error { // createHTTPClient create a clients to access API func (logstash *Logstash) createHTTPClient() (*http.Client, error) { - tlsConfig, err := logstash.ClientConfig.TLSConfig() - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsConfig, - }, - Timeout: time.Duration(logstash.Timeout), - } - - return client, nil + ctx := context.Background() + return logstash.HTTPClientConfig.CreateClient(ctx, logstash.Log) } // gatherJSONData query the data source and parse the response JSON diff --git a/plugins/inputs/logstash/sample.conf b/plugins/inputs/logstash/sample.conf index ed62dce13e4ad..20506d4c8fb42 100644 --- a/plugins/inputs/logstash/sample.conf +++ b/plugins/inputs/logstash/sample.conf @@ -25,6 +25,13 @@ ## Use TLS but skip chain & host verification. # insecure_skip_verify = false + + ## If 'use_system_proxy' is set to true, Telegraf will check env vars such as + ## HTTP_PROXY, HTTPS_PROXY, and NO_PROXY (or their lowercase counterparts). + ## If 'use_system_proxy' is set to false (default) and 'http_proxy_url' is + ## provided, Telegraf will use the specified URL as HTTP proxy. + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" ## Optional HTTP headers. # [inputs.logstash.headers] diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 65f399a97c964..45a36cfb63bee 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -416,14 +416,14 @@ func TestWithPathDoesNotModify(t *testing.T) { u, err := url.Parse("http://localhost:5051") require.NoError(t, err) v := withPath(u, "/xyzzy") - require.Equal(t, u.String(), "http://localhost:5051") - require.Equal(t, v.String(), "http://localhost:5051/xyzzy") + require.Equal(t, "http://localhost:5051", u.String()) + require.Equal(t, "http://localhost:5051/xyzzy", v.String()) } func TestURLTagDoesNotModify(t *testing.T) { u, err := url.Parse("http://a:b@localhost:5051?timeout=1ms") require.NoError(t, err) v := urlTag(u) - require.Equal(t, u.String(), "http://a:b@localhost:5051?timeout=1ms") - require.Equal(t, v, "http://localhost:5051") + require.Equal(t, "http://a:b@localhost:5051?timeout=1ms", u.String()) + require.Equal(t, "http://localhost:5051", v) } diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index 71c60dc0f5636..ad5a7cf21717c 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -106,6 +106,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## FLOAT16-IEEE, FLOAT32-IEEE, FLOAT64-IEEE (IEEE 754 binary representation) ## FIXED, UFIXED (fixed-point representation on input) ## FLOAT32 is a deprecated alias for UFIXED for historic reasons, should be avoided + ## STRING (byte-sequence converted to string) ## scale - the final numeric variable representation ## address - variable address @@ -116,6 +117,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, + { name = "firmware", byte_order = "AB", data_type = "STRING", address = [5, 6, 7, 8, 9, 10, 11, 12]}, ] input_registers = [ { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, @@ -147,7 +149,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. register = "coil" ## Name of the measurement. - ## Can be overriden by the individual field definitions. Defaults to "modbus" + ## Can be overridden by the individual field definitions. Defaults to "modbus" # measurement = "modbus" ## Request optimization algorithm. @@ -177,9 +179,12 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) - ## scale *1,2 - (optional) factor to scale the variable with - ## output *1,3 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if - ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). + ## STRING (byte-sequence converted to string) + ## length *1,2 - (optional) number of registers, ONLY valid for STRING type + ## scale *1,2,4 - (optional) factor to scale the variable with + ## output *1,3,4 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. + ## Defaults to FLOAT64 for numeric fields if "scale" is provided. + ## Otherwise the input "type" class is used (e.g. INT* -> INT64). ## measurement *1 - (optional) measurement name, defaults to the setting of the request ## omit - (optional) omit this field. Useful to leave out single values when querying many registers ## with a single request. Defaults to "false". @@ -189,13 +194,15 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## *3: This field can only be "UINT16" or "BOOL" if specified for both "coil" ## and "discrete"-input type of registers. By default the fields are ## output as zero or one in UINT16 format unless "BOOL" is used. + ## *4: These fields cannot be used with "STRING"-type fields. ## Coil / discrete input example fields = [ - { address=0, name="motor1_run"}, - { address=1, name="jog", measurement="motor"}, - { address=2, name="motor1_stop", omit=true}, - { address=3, name="motor1_overheating", output="BOOL"}, + { address=0, name="motor1_run" }, + { address=1, name="jog", measurement="motor" }, + { address=2, name="motor1_stop", omit=true }, + { address=3, name="motor1_overheating", output="BOOL" }, + { address=4, name="firmware", type="STRING", length=8 }, ] [inputs.modbus.request.tags] @@ -274,22 +281,25 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # measurement = "modbus" ## Field definitions - ## register - type of the modbus register, can be "coil", "discrete", - ## "holding" or "input". Defaults to "holding". - ## address - address of the register to query. For coil and discrete inputs this is the bit address. - ## name - field name - ## type *1 - type of the modbus field, can be - ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) - ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and - ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) - ## scale *1 - (optional) factor to scale the variable with - ## output *2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if - ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). + ## register - type of the modbus register, can be "coil", "discrete", + ## "holding" or "input". Defaults to "holding". + ## address - address of the register to query. For coil and discrete inputs this is the bit address. + ## name - field name + ## type *1 - type of the modbus field, can be + ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) + ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and + ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) + ## STRING (byte-sequence converted to string) + ## length *1 - (optional) number of registers, ONLY valid for STRING type + ## scale *1,3 - (optional) factor to scale the variable with + ## output *2,3 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if + ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). ## ## *1: These fields are ignored for both "coil" and "discrete"-input type of registers. ## *2: This field can only be "UINT16" or "BOOL" if specified for both "coil" ## and "discrete"-input type of registers. By default the fields are ## output as zero or one in UINT16 format unless "BOOL" is used. + ## *3: These fields cannot be used with "STRING"-type fields. fields = [ { register="coil", address=0, name="door_open"}, { register="coil", address=1, name="status_ok"}, @@ -298,6 +308,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. { address=5, name="energy", type="FLOAT32", scale=0.001,}, { address=7, name="frequency", type="UINT32", scale=0.1 }, { address=8, name="power_factor", type="INT64", scale=0.01 }, + { address=9, name="firmware", type="STRING", length=8 }, ] ## Tags assigned to the metric @@ -388,10 +399,10 @@ configuration for a single slave-device. The field `data_type` defines the representation of the data value on input from the modbus registers. The input values are then converted from the given `data_type` to a type that is appropriate when sending the value to the output -plugin. These output types are usually one of string, integer or -floating-point-number. The size of the output type is assumed to be large enough -for all supported input types. The mapping from the input type to the output -type is fixed and cannot be configured. +plugin. These output types are usually an integer or floating-point-number. The +size of the output type is assumed to be large enough for all supported input +types. The mapping from the input type to the output type is fixed and cannot +be configured. ##### Booleans: `BOOL` @@ -433,6 +444,13 @@ like 'int32 containing fixed-point representation with N decimal places'. (`FLOAT32` is deprecated and should not be used. `UFIXED` provides the same conversion from unsigned values). +##### String: `STRING` + +This type is used to query the number of registers specified in the `address` +setting and convert the byte-sequence to a string. Please note, if the +byte-sequence contains a `null` byte, the string is truncated at this position. +You cannot use the `scale` setting for string fields. + --- ### `request` configuration style @@ -563,6 +581,12 @@ half-precision float with a 16-bit representation. Usually the datatype of the register is listed in the datasheet of your modbus device in relation to the `address` described above. +The `STRING` datatype is special in that it requires the `length` setting to +be specified containing the length (in terms of number of registers) containing +the string. The returned byte-sequence is interpreted as string and truncated +to the first `null` byte found if any. The `scale` and `output` setting cannot +be used for this `type`. + This setting is ignored if the field's `omit` is set to `true` or if the `register` type is a bit-type (`coil` or `discrete`) and can be omitted in these cases. @@ -722,6 +746,12 @@ half-precision float with a 16-bit representation. Usually the datatype of the register is listed in the datasheet of your modbus device in relation to the `address` described above. +The `STRING` datatype is special in that it requires the `length` setting to +be specified containing the length (in terms of number of registers) containing +the string. The returned byte-sequence is interpreted as string and truncated +to the first `null` byte found if any. The `scale` and `output` setting cannot +be used for this `type`. + This setting is ignored if the `register` is a bit-type (`coil` or `discrete`) and can be omitted in these cases. diff --git a/plugins/inputs/modbus/configuration.go b/plugins/inputs/modbus/configuration.go index 0ddd71844aed5..0e40e79fbbd59 100644 --- a/plugins/inputs/modbus/configuration.go +++ b/plugins/inputs/modbus/configuration.go @@ -33,7 +33,7 @@ func normalizeInputDatatype(dataType string) (string, error) { switch dataType { case "INT8L", "INT8H", "UINT8L", "UINT8H", "INT16", "UINT16", "INT32", "UINT32", "INT64", "UINT64", - "FLOAT16", "FLOAT32", "FLOAT64": + "FLOAT16", "FLOAT32", "FLOAT64", "STRING": return dataType, nil } return "unknown", fmt.Errorf("unknown input type %q", dataType) @@ -43,7 +43,7 @@ func normalizeOutputDatatype(dataType string) (string, error) { switch dataType { case "", "native": return "native", nil - case "INT64", "UINT64", "FLOAT64": + case "INT64", "UINT64", "FLOAT64", "STRING": return dataType, nil } return "unknown", fmt.Errorf("unknown output type %q", dataType) diff --git a/plugins/inputs/modbus/configuration_metric.go b/plugins/inputs/modbus/configuration_metric.go index abd534f7f3bd8..e406467091740 100644 --- a/plugins/inputs/modbus/configuration_metric.go +++ b/plugins/inputs/modbus/configuration_metric.go @@ -15,6 +15,7 @@ var sampleConfigPartPerMetric string type metricFieldDefinition struct { RegisterType string `toml:"register"` Address uint16 `toml:"address"` + Length uint16 `toml:"length"` Name string `toml:"name"` InputType string `toml:"type"` Scale float64 `toml:"scale"` @@ -101,16 +102,32 @@ func (c *ConfigurationPerMetric) Check() error { // Check the input type switch f.InputType { case "": - case "INT8L", "INT8H", "INT16", "INT32", "INT64": - case "UINT8L", "UINT8H", "UINT16", "UINT32", "UINT64": - case "FLOAT16", "FLOAT32", "FLOAT64": + case "INT8L", "INT8H", "INT16", "INT32", "INT64", + "UINT8L", "UINT8H", "UINT16", "UINT32", "UINT64", + "FLOAT16", "FLOAT32", "FLOAT64": + if f.Length != 0 { + return fmt.Errorf("length option cannot be used for type %q of field %q", f.InputType, f.Name) + } + if f.OutputType == "STRING" { + return fmt.Errorf("cannot output field %q as string", f.Name) + } + case "STRING": + if f.Length < 1 { + return fmt.Errorf("missing length for string field %q", f.Name) + } + if f.Scale != 0.0 { + return fmt.Errorf("scale option cannot be used for string field %q", f.Name) + } + if f.OutputType != "" && f.OutputType != "STRING" { + return fmt.Errorf("invalid output type %q for string field %q", f.OutputType, f.Name) + } default: return fmt.Errorf("unknown register data-type %q for field %q", f.InputType, f.Name) } // Check output type switch f.OutputType { - case "", "INT64", "UINT64", "FLOAT64": + case "", "INT64", "UINT64", "FLOAT64", "STRING": default: return fmt.Errorf("unknown output data-type %q for field %q", f.OutputType, f.Name) } @@ -223,7 +240,7 @@ func (c *ConfigurationPerMetric) newField(def metricFieldDefinition, mdef metric fieldLength := uint16(1) if typed { var err error - if fieldLength, err = c.determineFieldLength(def.InputType); err != nil { + if fieldLength, err = c.determineFieldLength(def.InputType, def.Length); err != nil { return field{}, err } } @@ -258,8 +275,13 @@ func (c *ConfigurationPerMetric) newField(def metricFieldDefinition, mdef metric return field{}, err } } else { - // For scaling cases we always want FLOAT64 by default - def.OutputType = "FLOAT64" + // For scaling cases we always want FLOAT64 by default except for + // string fields + if def.InputType != "STRING" { + def.OutputType = "FLOAT64" + } else { + def.OutputType = "STRING" + } } } @@ -351,11 +373,13 @@ func (c *ConfigurationPerMetric) determineOutputDatatype(input string) (string, return "UINT64", nil case "FLOAT16", "FLOAT32", "FLOAT64": return "FLOAT64", nil + case "STRING": + return "STRING", nil } return "unknown", fmt.Errorf("invalid input datatype %q for determining output", input) } -func (c *ConfigurationPerMetric) determineFieldLength(input string) (uint16, error) { +func (c *ConfigurationPerMetric) determineFieldLength(input string, length uint16) (uint16, error) { // Handle our special types switch input { case "INT8L", "INT8H", "UINT8L", "UINT8H": @@ -366,6 +390,8 @@ func (c *ConfigurationPerMetric) determineFieldLength(input string) (uint16, err return 2, nil case "INT64", "UINT64", "FLOAT64": return 4, nil + case "STRING": + return length, nil } return 0, fmt.Errorf("invalid input datatype %q for determining field length", input) } diff --git a/plugins/inputs/modbus/configuration_metric_test.go b/plugins/inputs/modbus/configuration_metric_test.go index e3aac2c8ef920..7418ef189d7cb 100644 --- a/plugins/inputs/modbus/configuration_metric_test.go +++ b/plugins/inputs/modbus/configuration_metric_test.go @@ -162,6 +162,7 @@ func TestMetricResult(t *testing.T) { 0x00, 0x00, 0x08, 0x99, // 2201 0x00, 0x00, 0x08, 0x9A, // 2202 0x40, 0x49, 0x0f, 0xdb, // float32 of 3.1415927410125732421875 + 0x4d, 0x6f, 0x64, 0x62, 0x75, 0x73, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x00, // String "Modbus String" } // Write the data to a fake server @@ -203,6 +204,13 @@ func TestMetricResult(t *testing.T) { InputType: "INT16", RegisterType: "holding", }, + { + Name: "comment", + Address: uint16(11), + Length: 7, + InputType: "STRING", + RegisterType: "holding", + }, }, Tags: map[string]string{ "location": "main building", @@ -275,6 +283,7 @@ func TestMetricResult(t *testing.T) { map[string]interface{}{ "hours": uint64(10), "temperature": int64(42), + "comment": "Modbus String", }, time.Unix(0, 0), ), diff --git a/plugins/inputs/modbus/configuration_register.go b/plugins/inputs/modbus/configuration_register.go index cfdf097862e07..8266e59b4b997 100644 --- a/plugins/inputs/modbus/configuration_register.go +++ b/plugins/inputs/modbus/configuration_register.go @@ -202,14 +202,14 @@ func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini case "INT8L", "INT8H", "UINT8L", "UINT8H", "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT16-IEEE", "FLOAT32-IEEE", "FLOAT64-IEEE", "FLOAT32", "FIXED", "UFIXED": + // Check scale + if item.Scale == 0.0 { + return fmt.Errorf("invalid scale '%f' in %q - %q", item.Scale, registerType, item.Name) + } + case "STRING": default: return fmt.Errorf("invalid data type %q in %q - %q", item.DataType, registerType, item.Name) } - - // check scale - if item.Scale == 0.0 { - return fmt.Errorf("invalid scale '%f' in %q - %q", item.Scale, registerType, item.Name) - } } else { // Bit-registers do have less data types switch item.DataType { @@ -220,39 +220,41 @@ func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefini } // check address - if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { - return fmt.Errorf("invalid address '%v' length '%v' in %q - %q", item.Address, len(item.Address), registerType, item.Name) - } - - if registerType == cInputRegisters || registerType == cHoldingRegisters { - if 2*len(item.Address) != len(item.ByteOrder) { - return fmt.Errorf("invalid byte order %q and address '%v' in %q - %q", item.ByteOrder, item.Address, registerType, item.Name) - } - - // Check for the request size corresponding to the data-type - var requiredAddresses int - switch item.DataType { - case "INT8L", "INT8H", "UINT8L", "UINT8H", "UINT16", "INT16", "FLOAT16-IEEE": - requiredAddresses = 1 - case "UINT32", "INT32", "FLOAT32-IEEE": - requiredAddresses = 2 - - case "UINT64", "INT64", "FLOAT64-IEEE": - requiredAddresses = 4 - } - if requiredAddresses > 0 && len(item.Address) != requiredAddresses { - return fmt.Errorf( - "invalid address '%v' length '%v'in %q - %q, expecting %d entries for datatype", - item.Address, len(item.Address), registerType, item.Name, requiredAddresses, - ) + if item.DataType != "STRING" { + if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { + return fmt.Errorf("invalid address '%v' length '%v' in %q - %q", item.Address, len(item.Address), registerType, item.Name) } - // search duplicated - if len(item.Address) > len(removeDuplicates(item.Address)) { - return fmt.Errorf("duplicate address '%v' in %q - %q", item.Address, registerType, item.Name) + if registerType == cInputRegisters || registerType == cHoldingRegisters { + if 2*len(item.Address) != len(item.ByteOrder) { + return fmt.Errorf("invalid byte order %q and address '%v' in %q - %q", item.ByteOrder, item.Address, registerType, item.Name) + } + + // Check for the request size corresponding to the data-type + var requiredAddresses int + switch item.DataType { + case "INT8L", "INT8H", "UINT8L", "UINT8H", "UINT16", "INT16", "FLOAT16-IEEE": + requiredAddresses = 1 + case "UINT32", "INT32", "FLOAT32-IEEE": + requiredAddresses = 2 + + case "UINT64", "INT64", "FLOAT64-IEEE": + requiredAddresses = 4 + } + if requiredAddresses > 0 && len(item.Address) != requiredAddresses { + return fmt.Errorf( + "invalid address '%v' length '%v'in %q - %q, expecting %d entries for datatype", + item.Address, len(item.Address), registerType, item.Name, requiredAddresses, + ) + } + + // search duplicated + if len(item.Address) > len(removeDuplicates(item.Address)) { + return fmt.Errorf("duplicate address '%v' in %q - %q", item.Address, registerType, item.Name) + } + } else if len(item.Address) != 1 { + return fmt.Errorf("invalid address '%v' length '%v'in %q - %q", item.Address, len(item.Address), registerType, item.Name) } - } else if len(item.Address) != 1 { - return fmt.Errorf("invalid address '%v' length '%v'in %q - %q", item.Address, len(item.Address), registerType, item.Name) } } return nil @@ -297,6 +299,8 @@ func (c *ConfigurationOriginal) normalizeInputDatatype(dataType string, words in return "FLOAT32", nil case "FLOAT64-IEEE": return "FLOAT64", nil + case "STRING": + return "STRING", nil } return normalizeInputDatatype(dataType) } diff --git a/plugins/inputs/modbus/configuration_register_test.go b/plugins/inputs/modbus/configuration_register_test.go index 0b65a60a3fcb3..2d0cfcf9b9de4 100644 --- a/plugins/inputs/modbus/configuration_register_test.go +++ b/plugins/inputs/modbus/configuration_register_test.go @@ -841,6 +841,24 @@ func TestRegisterHoldingRegisters(t *testing.T) { write: []byte{0x14, 0xb8}, read: float64(-0.509765625), }, + { + name: "register250_abcd_string", + address: []uint16{250, 251, 252, 253, 254, 255, 256}, + quantity: 7, + byteOrder: "AB", + dataType: "STRING", + write: []byte{0x4d, 0x6f, 0x64, 0x62, 0x75, 0x73, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x00}, + read: "Modbus String", + }, + { + name: "register250_dcba_string", + address: []uint16{250, 251, 252, 253, 254, 255, 256}, + quantity: 7, + byteOrder: "BA", + dataType: "STRING", + write: []byte{0x6f, 0x4d, 0x62, 0x64, 0x73, 0x75, 0x53, 0x20, 0x72, 0x74, 0x6e, 0x69, 0x00, 0x67}, + read: "Modbus String", + }, } serv := mbserver.NewServer() diff --git a/plugins/inputs/modbus/configuration_request.go b/plugins/inputs/modbus/configuration_request.go index 3384ab75e2ab9..68d95a60d38fd 100644 --- a/plugins/inputs/modbus/configuration_request.go +++ b/plugins/inputs/modbus/configuration_request.go @@ -17,6 +17,7 @@ type requestFieldDefinition struct { Address uint16 `toml:"address"` Name string `toml:"name"` InputType string `toml:"type"` + Length uint16 `toml:"length"` Scale float64 `toml:"scale"` OutputType string `toml:"output"` Measurement string `toml:"measurement"` @@ -121,9 +122,25 @@ func (c *ConfigurationPerRequest) Check() error { if def.RegisterType == "holding" || def.RegisterType == "input" { switch f.InputType { case "": - case "INT8L", "INT8H", "INT16", "INT32", "INT64": - case "UINT8L", "UINT8H", "UINT16", "UINT32", "UINT64": - case "FLOAT16", "FLOAT32", "FLOAT64": + case "INT8L", "INT8H", "INT16", "INT32", "INT64", + "UINT8L", "UINT8H", "UINT16", "UINT32", "UINT64", + "FLOAT16", "FLOAT32", "FLOAT64": + if f.Length != 0 { + return fmt.Errorf("length option cannot be used for type %q of field %q", f.InputType, f.Name) + } + if f.OutputType == "STRING" { + return fmt.Errorf("cannot output field %q as string", f.Name) + } + case "STRING": + if f.Length < 1 { + return fmt.Errorf("missing length for string field %q", f.Name) + } + if f.Scale != 0.0 { + return fmt.Errorf("scale option cannot be used for string field %q", f.Name) + } + if f.OutputType != "" && f.OutputType != "STRING" { + return fmt.Errorf("invalid output type %q for string field %q", f.OutputType, f.Name) + } default: return fmt.Errorf("unknown register data-type %q for field %q", f.InputType, f.Name) } @@ -142,7 +159,7 @@ func (c *ConfigurationPerRequest) Check() error { // Check output type if def.RegisterType == "holding" || def.RegisterType == "input" { switch f.OutputType { - case "", "INT64", "UINT64", "FLOAT64": + case "", "INT64", "UINT64", "FLOAT64", "STRING": default: return fmt.Errorf("unknown output data-type %q for field %q", f.OutputType, f.Name) } @@ -269,7 +286,7 @@ func (c *ConfigurationPerRequest) newFieldFromDefinition(def requestFieldDefinit fieldLength := uint16(1) if typed { - if fieldLength, err = c.determineFieldLength(def.InputType); err != nil { + if fieldLength, err = c.determineFieldLength(def.InputType, def.Length); err != nil { return field{}, err } } @@ -306,8 +323,13 @@ func (c *ConfigurationPerRequest) newFieldFromDefinition(def requestFieldDefinit return field{}, err } } else { - // For scaling cases we always want FLOAT64 by default - def.OutputType = "FLOAT64" + // For scaling cases we always want FLOAT64 by default except for + // string fields + if def.InputType != "STRING" { + def.OutputType = "FLOAT64" + } else { + def.OutputType = "STRING" + } } } @@ -398,11 +420,13 @@ func (c *ConfigurationPerRequest) determineOutputDatatype(input string) (string, return "UINT64", nil case "FLOAT16", "FLOAT32", "FLOAT64": return "FLOAT64", nil + case "STRING": + return "STRING", nil } return "unknown", fmt.Errorf("invalid input datatype %q for determining output", input) } -func (c *ConfigurationPerRequest) determineFieldLength(input string) (uint16, error) { +func (c *ConfigurationPerRequest) determineFieldLength(input string, length uint16) (uint16, error) { // Handle our special types switch input { case "INT8L", "INT8H", "UINT8L", "UINT8H": @@ -413,6 +437,8 @@ func (c *ConfigurationPerRequest) determineFieldLength(input string) (uint16, er return 2, nil case "INT64", "UINT64", "FLOAT64": return 4, nil + case "STRING": + return length, nil } return 0, fmt.Errorf("invalid input datatype %q for determining field length", input) } diff --git a/plugins/inputs/modbus/configuration_request_test.go b/plugins/inputs/modbus/configuration_request_test.go index 5faf5d4f5a16d..86c6600c8ecd4 100644 --- a/plugins/inputs/modbus/configuration_request_test.go +++ b/plugins/inputs/modbus/configuration_request_test.go @@ -457,6 +457,7 @@ func TestRequestTypesHoldingABCD(t *testing.T) { tests := []struct { name string address uint16 + length uint16 byteOrder string dataTypeIn string dataTypeOut string @@ -989,6 +990,14 @@ func TestRequestTypesHoldingABCD(t *testing.T) { write: []byte{0xb8, 0x14}, read: float64(-0.509765625), }, + { + name: "register110_string", + address: 110, + dataTypeIn: "STRING", + length: 7, + write: []byte{0x4d, 0x6f, 0x64, 0x62, 0x75, 0x73, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x00}, + read: "Modbus String", + }, } serv := mbserver.NewServer() @@ -1024,6 +1033,7 @@ func TestRequestTypesHoldingABCD(t *testing.T) { OutputType: hrt.dataTypeOut, Scale: hrt.scale, Address: hrt.address, + Length: hrt.length, }, }, }, @@ -1058,6 +1068,7 @@ func TestRequestTypesHoldingDCBA(t *testing.T) { tests := []struct { name string address uint16 + length uint16 byteOrder string dataTypeIn string dataTypeOut string @@ -1590,6 +1601,14 @@ func TestRequestTypesHoldingDCBA(t *testing.T) { write: []byte{0xb8, 0x14}, read: float64(-0.509765625), }, + { + name: "register110_string", + address: 110, + dataTypeIn: "STRING", + length: 7, + write: []byte{0x6f, 0x4d, 0x62, 0x64, 0x73, 0x75, 0x53, 0x20, 0x72, 0x74, 0x6e, 0x69, 0x00, 0x67}, + read: "Modbus String", + }, } serv := mbserver.NewServer() @@ -1605,8 +1624,13 @@ func TestRequestTypesHoldingDCBA(t *testing.T) { t.Run(hrt.name, func(t *testing.T) { quantity := uint16(len(hrt.write) / 2) invert := make([]byte, 0, len(hrt.write)) - for i := len(hrt.write) - 1; i >= 0; i-- { - invert = append(invert, hrt.write[i]) + if hrt.dataTypeIn != "STRING" { + for i := len(hrt.write) - 1; i >= 0; i-- { + invert = append(invert, hrt.write[i]) + } + } else { + // Put in raw data for strings + invert = append(invert, hrt.write...) } _, err := client.WriteMultipleRegisters(hrt.address, quantity, invert) require.NoError(t, err) @@ -1629,6 +1653,7 @@ func TestRequestTypesHoldingDCBA(t *testing.T) { OutputType: hrt.dataTypeOut, Scale: hrt.scale, Address: hrt.address, + Length: hrt.length, }, }, }, diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 00d715b34536a..419ba6795674a 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -293,7 +293,7 @@ func TestRetryFailIllegal(t *testing.T) { require.NoError(t, modbus.Gather(&acc)) require.Len(t, acc.Errors, 1) require.EqualError(t, acc.FirstError(), "slave 1: modbus: exception '1' (illegal function), function '129'") - require.Equal(t, counter, 1) + require.Equal(t, 1, counter) } func TestCases(t *testing.T) { diff --git a/plugins/inputs/modbus/request.go b/plugins/inputs/modbus/request.go index 3da7e257c2a49..fb6c248cfb7c6 100644 --- a/plugins/inputs/modbus/request.go +++ b/plugins/inputs/modbus/request.go @@ -275,7 +275,7 @@ func groupFieldsToRequests(fields []field, params groupingParams) []request { } requests = optimizeGroup(total, params.MaxBatchSize) case "max_insert": - // Similar to aggressive but keeps the number of touched registers bellow a threshold + // Similar to aggressive but keeps the number of touched registers below a threshold var total request for _, g := range groups { if len(g.fields) > 0 { diff --git a/plugins/inputs/modbus/sample_metric.conf b/plugins/inputs/modbus/sample_metric.conf index ff2d8abfbed03..5eb10979b4312 100644 --- a/plugins/inputs/modbus/sample_metric.conf +++ b/plugins/inputs/modbus/sample_metric.conf @@ -35,22 +35,25 @@ # measurement = "modbus" ## Field definitions - ## register - type of the modbus register, can be "coil", "discrete", - ## "holding" or "input". Defaults to "holding". - ## address - address of the register to query. For coil and discrete inputs this is the bit address. - ## name - field name - ## type *1 - type of the modbus field, can be - ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) - ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and - ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) - ## scale *1 - (optional) factor to scale the variable with - ## output *2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if - ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). + ## register - type of the modbus register, can be "coil", "discrete", + ## "holding" or "input". Defaults to "holding". + ## address - address of the register to query. For coil and discrete inputs this is the bit address. + ## name - field name + ## type *1 - type of the modbus field, can be + ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) + ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and + ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) + ## STRING (byte-sequence converted to string) + ## length *1 - (optional) number of registers, ONLY valid for STRING type + ## scale *1,3 - (optional) factor to scale the variable with + ## output *2,3 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if + ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). ## ## *1: These fields are ignored for both "coil" and "discrete"-input type of registers. ## *2: This field can only be "UINT16" or "BOOL" if specified for both "coil" ## and "discrete"-input type of registers. By default the fields are ## output as zero or one in UINT16 format unless "BOOL" is used. + ## *3: These fields cannot be used with "STRING"-type fields. fields = [ { register="coil", address=0, name="door_open"}, { register="coil", address=1, name="status_ok"}, @@ -59,6 +62,7 @@ { address=5, name="energy", type="FLOAT32", scale=0.001,}, { address=7, name="frequency", type="UINT32", scale=0.1 }, { address=8, name="power_factor", type="INT64", scale=0.01 }, + { address=9, name="firmware", type="STRING", length=8 }, ] ## Tags assigned to the metric diff --git a/plugins/inputs/modbus/sample_register.conf b/plugins/inputs/modbus/sample_register.conf index fb4567b86882e..4da7aaffdc8a0 100644 --- a/plugins/inputs/modbus/sample_register.conf +++ b/plugins/inputs/modbus/sample_register.conf @@ -34,6 +34,7 @@ ## FLOAT16-IEEE, FLOAT32-IEEE, FLOAT64-IEEE (IEEE 754 binary representation) ## FIXED, UFIXED (fixed-point representation on input) ## FLOAT32 is a deprecated alias for UFIXED for historic reasons, should be avoided + ## STRING (byte-sequence converted to string) ## scale - the final numeric variable representation ## address - variable address @@ -44,6 +45,7 @@ { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, + { name = "firmware", byte_order = "AB", data_type = "STRING", address = [5, 6, 7, 8, 9, 10, 11, 12]}, ] input_registers = [ { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, diff --git a/plugins/inputs/modbus/sample_request.conf b/plugins/inputs/modbus/sample_request.conf index adab9b8152d92..7ef998ce60b53 100644 --- a/plugins/inputs/modbus/sample_request.conf +++ b/plugins/inputs/modbus/sample_request.conf @@ -22,7 +22,7 @@ register = "coil" ## Name of the measurement. - ## Can be overriden by the individual field definitions. Defaults to "modbus" + ## Can be overridden by the individual field definitions. Defaults to "modbus" # measurement = "modbus" ## Request optimization algorithm. @@ -52,9 +52,12 @@ ## INT8L, INT8H, UINT8L, UINT8H (low and high byte variants) ## INT16, UINT16, INT32, UINT32, INT64, UINT64 and ## FLOAT16, FLOAT32, FLOAT64 (IEEE 754 binary representation) - ## scale *1,2 - (optional) factor to scale the variable with - ## output *1,3 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if - ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). + ## STRING (byte-sequence converted to string) + ## length *1,2 - (optional) number of registers, ONLY valid for STRING type + ## scale *1,2,4 - (optional) factor to scale the variable with + ## output *1,3,4 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. + ## Defaults to FLOAT64 for numeric fields if "scale" is provided. + ## Otherwise the input "type" class is used (e.g. INT* -> INT64). ## measurement *1 - (optional) measurement name, defaults to the setting of the request ## omit - (optional) omit this field. Useful to leave out single values when querying many registers ## with a single request. Defaults to "false". @@ -64,13 +67,15 @@ ## *3: This field can only be "UINT16" or "BOOL" if specified for both "coil" ## and "discrete"-input type of registers. By default the fields are ## output as zero or one in UINT16 format unless "BOOL" is used. + ## *4: These fields cannot be used with "STRING"-type fields. ## Coil / discrete input example fields = [ - { address=0, name="motor1_run"}, - { address=1, name="jog", measurement="motor"}, - { address=2, name="motor1_stop", omit=true}, - { address=3, name="motor1_overheating", output="BOOL"}, + { address=0, name="motor1_run" }, + { address=1, name="jog", measurement="motor" }, + { address=2, name="motor1_stop", omit=true }, + { address=3, name="motor1_overheating", output="BOOL" }, + { address=4, name="firmware", type="STRING", length=8 }, ] [inputs.modbus.request.tags] diff --git a/plugins/inputs/modbus/type_conversions.go b/plugins/inputs/modbus/type_conversions.go index 795b549c4de21..f8e9f99ce8009 100644 --- a/plugins/inputs/modbus/type_conversions.go +++ b/plugins/inputs/modbus/type_conversions.go @@ -1,6 +1,8 @@ package modbus -import "fmt" +import ( + "fmt" +) func determineUntypedConverter(outType string) (fieldConverterFunc, error) { switch outType { @@ -17,7 +19,7 @@ func determineUntypedConverter(outType string) (fieldConverterFunc, error) { } func determineConverter(inType, byteOrder, outType string, scale float64) (fieldConverterFunc, error) { - if scale != 0.0 { + if scale != 0.0 && inType != "STRING" { return determineConverterScale(inType, byteOrder, outType, scale) } return determineConverterNoScale(inType, byteOrder, outType) @@ -83,6 +85,8 @@ func determineConverterNoScale(inType, byteOrder, outType string) (fieldConverte return determineConverterF32(outType, byteOrder) case "FLOAT64": return determineConverterF64(outType, byteOrder) + case "STRING": + return determineConverterString(byteOrder) } return nil, fmt.Errorf("invalid input data-type: %s", inType) } diff --git a/plugins/inputs/modbus/type_conversions_string.go b/plugins/inputs/modbus/type_conversions_string.go new file mode 100644 index 0000000000000..8397ae9f0e41a --- /dev/null +++ b/plugins/inputs/modbus/type_conversions_string.go @@ -0,0 +1,25 @@ +package modbus + +import ( + "bytes" +) + +func determineConverterString(byteOrder string) (fieldConverterFunc, error) { + tohost, err := endiannessConverter16(byteOrder) + if err != nil { + return nil, err + } + + return func(b []byte) interface{} { + // Swap the bytes according to endianness + var buf bytes.Buffer + for i := 0; i < len(b); i += 2 { + v := tohost(b[i : i+2]) + _ = buf.WriteByte(byte(v >> 8)) + _ = buf.WriteByte(byte(v & 0xFF)) + } + // Remove everything after null-termination + s, _ := bytes.CutSuffix(buf.Bytes(), []byte{0x00}) + return string(s) + }, nil +} diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index af267283621e3..e8dc0ce82d708 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -329,7 +329,7 @@ func TestAddShardHostStats(t *testing.T) { } sort.Strings(hostsFound) sort.Strings(expectedHosts) - require.Equal(t, hostsFound, expectedHosts) + require.Equal(t, expectedHosts, hostsFound) } func TestStateTag(t *testing.T) { diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go index 47ba058847e07..3b8ded4ced4c8 100644 --- a/plugins/inputs/mongodb/mongostat_test.go +++ b/plugins/inputs/mongodb/mongostat_test.go @@ -53,12 +53,12 @@ func TestLatencyStats(t *testing.T) { 60, ) - require.Equal(t, sl.CommandLatency, int64(0)) - require.Equal(t, sl.ReadLatency, int64(0)) - require.Equal(t, sl.WriteLatency, int64(0)) - require.Equal(t, sl.CommandOpsCnt, int64(0)) - require.Equal(t, sl.ReadOpsCnt, int64(0)) - require.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, int64(0), sl.CommandLatency) + require.Equal(t, int64(0), sl.ReadLatency) + require.Equal(t, int64(0), sl.WriteLatency) + require.Equal(t, int64(0), sl.CommandOpsCnt) + require.Equal(t, int64(0), sl.ReadOpsCnt) + require.Equal(t, int64(0), sl.WriteOpsCnt) } func TestLatencyStatsDiffZero(t *testing.T) { @@ -122,12 +122,12 @@ func TestLatencyStatsDiffZero(t *testing.T) { 60, ) - require.Equal(t, sl.CommandLatency, int64(0)) - require.Equal(t, sl.ReadLatency, int64(0)) - require.Equal(t, sl.WriteLatency, int64(0)) - require.Equal(t, sl.CommandOpsCnt, int64(0)) - require.Equal(t, sl.ReadOpsCnt, int64(0)) - require.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, int64(0), sl.CommandLatency) + require.Equal(t, int64(0), sl.ReadLatency) + require.Equal(t, int64(0), sl.WriteLatency) + require.Equal(t, int64(0), sl.CommandOpsCnt) + require.Equal(t, int64(0), sl.ReadOpsCnt) + require.Equal(t, int64(0), sl.WriteOpsCnt) } func TestLatencyStatsDiff(t *testing.T) { @@ -191,12 +191,12 @@ func TestLatencyStatsDiff(t *testing.T) { 60, ) - require.Equal(t, sl.CommandLatency, int64(59177981552)) - require.Equal(t, sl.ReadLatency, int64(2255946760057)) - require.Equal(t, sl.WriteLatency, int64(494479456987)) - require.Equal(t, sl.CommandOpsCnt, int64(1019152861)) - require.Equal(t, sl.ReadOpsCnt, int64(4189049884)) - require.Equal(t, sl.WriteOpsCnt, int64(1691021287)) + require.Equal(t, int64(59177981552), sl.CommandLatency) + require.Equal(t, int64(2255946760057), sl.ReadLatency) + require.Equal(t, int64(494479456987), sl.WriteLatency) + require.Equal(t, int64(1019152861), sl.CommandOpsCnt) + require.Equal(t, int64(4189049884), sl.ReadOpsCnt) + require.Equal(t, int64(1691021287), sl.WriteOpsCnt) } func TestLocksStatsNilWhenLocksMissingInOldStat(t *testing.T) { diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index 9ec38d7640b24..7538a997e3b8f 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -591,7 +591,7 @@ func TestInvalidUsernameOrPassword(t *testing.T) { return } - require.Equal(t, r.URL.Path, "/_status", "Cannot handle request") + require.Equal(t, "/_status", r.URL.Path, "Cannot handle request") http.ServeFile(w, r, "testdata/response_servicetype_0.xml") })) @@ -618,7 +618,7 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) { return } - require.Equal(t, r.URL.Path, "/_status", "Cannot handle request") + require.Equal(t, "/_status", r.URL.Path, "Cannot handle request") http.ServeFile(w, r, "testdata/response_servicetype_0.xml") })) diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 5e9b5e782bc4e..47e1401e8ce40 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -25,7 +25,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Secret-store support -This plugin supports secrets from secret-stores for the `usernane` and +This plugin supports secrets from secret-stores for the `username` and `password` option. See the [secret-store documentation][SECRETSTORE] for more details on how to use them. diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index dafcc9e3597d0..69aa74a0ad9ae 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -325,8 +325,8 @@ func (m *MQTTConsumer) onMessage(_ mqtt.Client, msg mqtt.Message) { } } } - id := m.acc.AddTrackingMetricGroup(metrics) m.messagesMutex.Lock() + id := m.acc.AddTrackingMetricGroup(metrics) m.messages[id] = msg m.messagesMutex.Unlock() } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 857bcad465922..f01cf986832ca 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -6,10 +6,11 @@ import ( "time" mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type FakeClient struct { @@ -527,7 +528,7 @@ func TestAddRouteCalledForEachTopic(t *testing.T) { plugin.Stop() - require.Equal(t, client.addRouteCallCount, 2) + require.Equal(t, 2, client.addRouteCallCount) } func TestSubscribeCalledIfNoSession(t *testing.T) { @@ -558,7 +559,7 @@ func TestSubscribeCalledIfNoSession(t *testing.T) { plugin.Stop() - require.Equal(t, client.subscribeCallCount, 1) + require.Equal(t, 1, client.subscribeCallCount) } func TestSubscribeNotCalledIfSession(t *testing.T) { @@ -589,5 +590,5 @@ func TestSubscribeNotCalledIfSession(t *testing.T) { plugin.Stop() - require.Equal(t, client.subscribeCallCount, 0) + require.Equal(t, 0, client.subscribeCallCount) } diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go index 906bf14e90ef5..588a958e882c6 100644 --- a/plugins/inputs/nats/nats_test.go +++ b/plugins/inputs/nats/nats_test.go @@ -8,8 +8,9 @@ import ( "net/http/httptest" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var sampleVarz = ` @@ -70,7 +71,7 @@ func TestMetricsCorrect(t *testing.T) { var acc testutil.Accumulator srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.URL.Path, "/varz", "Cannot handle request") + require.Equal(t, "/varz", r.URL.Path, "Cannot handle request") rsp := sampleVarz _, err := fmt.Fprintln(w, rsp) diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index 29a1c6473e2ec..b77e8ac1edb12 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -370,7 +370,7 @@ func TestParseXML(t *testing.T) { } // No error case require.NoErrorf(t, err, "expected no error but got: %v", err) - require.Equalf(t, len(acc.Errors) > 0, test.wantAccErr, + require.Equalf(t, test.wantAccErr, len(acc.Errors) > 0, "Accumulator errors. got=%v, want=%t", acc.Errors, test.wantAccErr) testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), test.wantMetrics) @@ -511,7 +511,7 @@ func TestFindProbe(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() index := findProbe(test.probeName, fakeProbes) - require.Equalf(t, index, test.wantIndex, "probe index mismatch; got=%d, want %d", index, test.wantIndex) + require.Equalf(t, test.wantIndex, index, "probe index mismatch; got=%d, want %d", index, test.wantIndex) }) } } diff --git a/plugins/inputs/net/net_test.go b/plugins/inputs/net/net_test.go index 68babcf977fc4..e497c24a751fa 100644 --- a/plugins/inputs/net/net_test.go +++ b/plugins/inputs/net/net_test.go @@ -1,17 +1,15 @@ package net import ( - "syscall" "testing" - "github.com/influxdata/telegraf/plugins/inputs/netstat" "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/v3/net" "github.com/stretchr/testify/require" ) -func TestNetStats(t *testing.T) { +func TestNetIOStats(t *testing.T) { var mps system.MockPS var err error defer mps.AssertExpectations(t) @@ -42,23 +40,6 @@ func TestNetStats(t *testing.T) { } mps.On("NetProto").Return(netprotos, nil) - netstats := []net.ConnectionStat{ - { - Type: syscall.SOCK_DGRAM, - }, - { - Status: "ESTABLISHED", - }, - { - Status: "ESTABLISHED", - }, - { - Status: "CLOSE", - }, - } - - mps.On("NetConnections").Return(netstats, nil) - err = (&NetIOStats{ps: &mps, skipChecks: true}).Gather(&acc) require.NoError(t, err) @@ -86,34 +67,4 @@ func TestNetStats(t *testing.T) { "interface": "all", } acc.AssertContainsTaggedFields(t, "net", fields2, ntags) - - acc.Metrics = nil - - err = (&netstat.NetStats{ - PS: &mps, - }).Gather(&acc) - require.NoError(t, err) - - fields3 := map[string]interface{}{ - "tcp_established": 2, - "tcp_syn_sent": 0, - "tcp_syn_recv": 0, - "tcp_fin_wait1": 0, - "tcp_fin_wait2": 0, - "tcp_time_wait": 0, - "tcp_close": 1, - "tcp_close_wait": 0, - "tcp_last_ack": 0, - "tcp_listen": 0, - "tcp_closing": 0, - "tcp_none": 0, - "udp_socket": 1, - } - acc.AssertContainsTaggedFields(t, "netstat", fields3, make(map[string]string)) - - acc.Metrics = nil - err = (&NetIOStats{ps: &mps, IgnoreProtocolStats: true}).Gather(&acc) - require.NoError(t, err) - - acc.AssertDoesNotContainsTaggedFields(t, "netstat", fields3, make(map[string]string)) } diff --git a/plugins/inputs/netstat/netstat_test.go b/plugins/inputs/netstat/netstat_test.go new file mode 100644 index 0000000000000..8208beb3afbf5 --- /dev/null +++ b/plugins/inputs/netstat/netstat_test.go @@ -0,0 +1,65 @@ +package netstat + +import ( + "syscall" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/inputs/system" + "github.com/influxdata/telegraf/testutil" + "github.com/shirou/gopsutil/v3/net" + "github.com/stretchr/testify/require" +) + +func TestNetStats(t *testing.T) { + var mps system.MockPS + defer mps.AssertExpectations(t) + mps.On("NetConnections").Return([]net.ConnectionStat{ + { + Type: syscall.SOCK_DGRAM, + }, + { + Status: "ESTABLISHED", + }, + { + Status: "ESTABLISHED", + }, + { + Status: "CLOSE", + }, + }, nil) + + var acc testutil.Accumulator + require.NoError(t, (&NetStats{PS: &mps}).Gather(&acc)) + + expected := []telegraf.Metric{ + metric.New( + "netstat", + map[string]string{}, + map[string]interface{}{ + "tcp_established": 2, + "tcp_syn_sent": 0, + "tcp_syn_recv": 0, + "tcp_fin_wait1": 0, + "tcp_fin_wait2": 0, + "tcp_time_wait": 0, + "tcp_close": 1, + "tcp_close_wait": 0, + "tcp_last_ack": 0, + "tcp_listen": 0, + "tcp_closing": 0, + "tcp_none": 0, + "udp_socket": 1, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, + expected, + acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), + ) +} diff --git a/plugins/inputs/nfsclient/README.md b/plugins/inputs/nfsclient/README.md index 149900bb75bbd..f144c3e503a03 100644 --- a/plugins/inputs/nfsclient/README.md +++ b/plugins/inputs/nfsclient/README.md @@ -45,7 +45,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## NFSv3 and NFSv4 have different lists. While it is not possible to ## have different include/exclude lists for NFSv3/4, unused elements ## in the list should be okay. It is possible to have different lists - ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## for different mountpoints: use multiple [[input.nfsclient]] stanzas, ## with their own lists. See "include_mounts" above, and be careful of ## duplicate metrics. # include_operations = [] @@ -89,9 +89,9 @@ MOUNT_PROC: /host/proc/self/mountstats - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent _and_ received, including overhead _and_ payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) - ops (integer, count) - The number of operations of this type executed. - retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below) - - exe (integer, miliseconds) - The number of miliseconds it took to process the operations. - - rtt (integer, miliseconds) - The total round-trip time for all operations. - - rtt_per_op (float, miliseconds) - The average round-trip time per operation. + - exe (integer, milliseconds) - The number of milliseconds it took to process the operations. + - rtt (integer, milliseconds) - The total round-trip time for all operations. + - rtt_per_op (float, milliseconds) - The average round-trip time per operation. In addition enabling `fullstat` will make many more metrics available. diff --git a/plugins/inputs/nfsclient/sample.conf b/plugins/inputs/nfsclient/sample.conf index fbd1371c5f35c..d84451374ef93 100644 --- a/plugins/inputs/nfsclient/sample.conf +++ b/plugins/inputs/nfsclient/sample.conf @@ -20,7 +20,7 @@ ## NFSv3 and NFSv4 have different lists. While it is not possible to ## have different include/exclude lists for NFSv3/4, unused elements ## in the list should be okay. It is possible to have different lists - ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## for different mountpoints: use multiple [[input.nfsclient]] stanzas, ## with their own lists. See "include_mounts" above, and be careful of ## duplicate metrics. # include_operations = [] diff --git a/plugins/inputs/nginx_plus/nginx_plus_test.go b/plugins/inputs/nginx_plus/nginx_plus_test.go index 24aa708d567d1..cac4676b0dffd 100644 --- a/plugins/inputs/nginx_plus/nginx_plus_test.go +++ b/plugins/inputs/nginx_plus/nginx_plus_test.go @@ -253,7 +253,7 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - require.Equal(t, r.URL.Path, "/status", "Cannot handle request") + require.Equal(t, "/status", r.URL.Path, "Cannot handle request") rsp = sampleStatusResponse w.Header()["Content-Type"] = []string{"application/json"} diff --git a/plugins/inputs/nginx_sts/nginx_sts_test.go b/plugins/inputs/nginx_sts/nginx_sts_test.go index 9ebb5f91ad9d8..121036167696a 100644 --- a/plugins/inputs/nginx_sts/nginx_sts_test.go +++ b/plugins/inputs/nginx_sts/nginx_sts_test.go @@ -8,8 +8,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const sampleStatusResponse = ` @@ -166,7 +167,7 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - require.Equal(t, r.URL.Path, "/status", "Cannot handle request") + require.Equal(t, "/status", r.URL.Path, "Cannot handle request") rsp = sampleStatusResponse w.Header()["Content-Type"] = []string{"application/json"} diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go index 353619b362228..e12bb1e2ccbe3 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go @@ -6,8 +6,9 @@ import ( "net/http/httptest" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const sampleStatusResponse = ` @@ -45,7 +46,7 @@ func TestNginxUpstreamCheckData(test *testing.T) { testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { var response string - require.Equal(test, request.URL.Path, "/status", "Cannot handle request") + require.Equal(test, "/status", request.URL.Path, "Cannot handle request") response = sampleStatusResponse responseWriter.Header()["Content-Type"] = []string{"application/json"} @@ -102,7 +103,7 @@ func TestNginxUpstreamCheckRequest(test *testing.T) { testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { var response string - require.Equal(test, request.URL.Path, "/status", "Cannot handle request") + require.Equal(test, "/status", request.URL.Path, "Cannot handle request") response = sampleStatusResponse responseWriter.Header()["Content-Type"] = []string{"application/json"} @@ -110,10 +111,10 @@ func TestNginxUpstreamCheckRequest(test *testing.T) { _, err := fmt.Fprintln(responseWriter, response) require.NoError(test, err) - require.Equal(test, request.Method, "POST") - require.Equal(test, request.Header.Get("X-Test"), "test-value") - require.Equal(test, request.Header.Get("Authorization"), "Basic dXNlcjpwYXNzd29yZA==") - require.Equal(test, request.Host, "status.local") + require.Equal(test, "POST", request.Method) + require.Equal(test, "test-value", request.Header.Get("X-Test")) + require.Equal(test, "Basic dXNlcjpwYXNzd29yZA==", request.Header.Get("Authorization")) + require.Equal(test, "status.local", request.Host) })) defer testServer.Close() diff --git a/plugins/inputs/nginx_vts/nginx_vts_test.go b/plugins/inputs/nginx_vts/nginx_vts_test.go index 589bc634f9358..cb4567947d47e 100644 --- a/plugins/inputs/nginx_vts/nginx_vts_test.go +++ b/plugins/inputs/nginx_vts/nginx_vts_test.go @@ -8,8 +8,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const sampleStatusResponse = ` @@ -203,7 +204,7 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - require.Equal(t, r.URL.Path, "/status", "Cannot handle request") + require.Equal(t, "/status", r.URL.Path, "Cannot handle request") rsp = sampleStatusResponse w.Header()["Content-Type"] = []string{"application/json"} diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index ffca02b31a908..88bb206480199 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -25,9 +25,9 @@ func TestParseSimpleOutput(t *testing.T) { require.NoError(t, err) require.True(t, acc.HasMeasurement("openntpd")) - require.Equal(t, acc.NMetrics(), uint64(1)) + require.Equal(t, uint64(1), acc.NMetrics()) - require.Equal(t, acc.NFields(), 7) + require.Equal(t, 7, acc.NFields()) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -56,9 +56,9 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { require.NoError(t, err) require.True(t, acc.HasMeasurement("openntpd")) - require.Equal(t, acc.NMetrics(), uint64(1)) + require.Equal(t, uint64(1), acc.NMetrics()) - require.Equal(t, acc.NFields(), 7) + require.Equal(t, 7, acc.NFields()) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -88,9 +88,9 @@ func TestParseSimpleOutputInvalidPeer(t *testing.T) { require.NoError(t, err) require.True(t, acc.HasMeasurement("openntpd")) - require.Equal(t, acc.NMetrics(), uint64(1)) + require.Equal(t, uint64(1), acc.NMetrics()) - require.Equal(t, acc.NFields(), 4) + require.Equal(t, 4, acc.NFields()) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -116,9 +116,9 @@ func TestParseSimpleOutputServersDNSError(t *testing.T) { require.NoError(t, err) require.True(t, acc.HasMeasurement("openntpd")) - require.Equal(t, acc.NMetrics(), uint64(1)) + require.Equal(t, uint64(1), acc.NMetrics()) - require.Equal(t, acc.NFields(), 4) + require.Equal(t, 4, acc.NFields()) firstpeerfields := map[string]interface{}{ "next": int64(2), @@ -158,9 +158,9 @@ func TestParseSimpleOutputServerDNSError(t *testing.T) { require.NoError(t, err) require.True(t, acc.HasMeasurement("openntpd")) - require.Equal(t, acc.NMetrics(), uint64(1)) + require.Equal(t, uint64(1), acc.NMetrics()) - require.Equal(t, acc.NFields(), 4) + require.Equal(t, 4, acc.NFields()) firstpeerfields := map[string]interface{}{ "next": int64(12), @@ -186,9 +186,9 @@ func TestParseFullOutput(t *testing.T) { require.NoError(t, err) require.True(t, acc.HasMeasurement("openntpd")) - require.Equal(t, acc.NMetrics(), uint64(20)) + require.Equal(t, uint64(20), acc.NMetrics()) - require.Equal(t, acc.NFields(), 113) + require.Equal(t, 113, acc.NFields()) firstpeerfields := map[string]interface{}{ "wt": int64(1), diff --git a/plugins/inputs/opensearch_query/README.md b/plugins/inputs/opensearch_query/README.md index 77dba39705681..00a47fc23b9aa 100755 --- a/plugins/inputs/opensearch_query/README.md +++ b/plugins/inputs/opensearch_query/README.md @@ -33,7 +33,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # Derive metrics from aggregating OpenSearch query results [[inputs.opensearch_query]] ## OpenSearch cluster endpoint(s). Multiple urls can be specified as part - ## of the same cluster. Only one succesful call will be made per interval. + ## of the same cluster. Only one successful call will be made per interval. urls = [ "https://node1.os.example.com:9200" ] # required. ## OpenSearch client timeout, defaults to "5s". diff --git a/plugins/inputs/opensearch_query/sample.conf b/plugins/inputs/opensearch_query/sample.conf index 30b672e23fa7b..7d1dae43c33ec 100644 --- a/plugins/inputs/opensearch_query/sample.conf +++ b/plugins/inputs/opensearch_query/sample.conf @@ -1,7 +1,7 @@ # Derive metrics from aggregating OpenSearch query results [[inputs.opensearch_query]] ## OpenSearch cluster endpoint(s). Multiple urls can be specified as part - ## of the same cluster. Only one succesful call will be made per interval. + ## of the same cluster. Only one successful call will be made per interval. urls = [ "https://node1.os.example.com:9200" ] # required. ## OpenSearch client timeout, defaults to "5s". diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index 3b625be51cef2..47e536c728cb9 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -25,9 +25,9 @@ func TestFilterSomeStats(t *testing.T) { require.NoError(t, err) require.True(t, acc.HasMeasurement("opensmtpd")) - require.Equal(t, acc.NMetrics(), uint64(1)) + require.Equal(t, uint64(1), acc.NMetrics()) - require.Equal(t, acc.NFields(), 36) + require.Equal(t, 36, acc.NFields()) acc.AssertContainsFields(t, "opensmtpd", parsedFullOutput) } diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md index 4c8afe46cab36..402d8a2df8d7b 100644 --- a/plugins/inputs/openweathermap/README.md +++ b/plugins/inputs/openweathermap/README.md @@ -57,7 +57,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # query_style = "batch" ## Query interval to fetch data. - ## By default the gloabl 'interval' setting is used. You should override the + ## By default the global 'interval' setting is used. You should override the ## interval here if the global setting is shorter than 10 minutes as ## OpenWeatherMap weather data is only updated every 10 minutes. # interval = "10m" diff --git a/plugins/inputs/openweathermap/sample.conf b/plugins/inputs/openweathermap/sample.conf index d44bc064036d2..b8134899b7e5a 100644 --- a/plugins/inputs/openweathermap/sample.conf +++ b/plugins/inputs/openweathermap/sample.conf @@ -33,7 +33,7 @@ # query_style = "batch" ## Query interval to fetch data. - ## By default the gloabl 'interval' setting is used. You should override the + ## By default the global 'interval' setting is used. You should override the ## interval here if the global setting is shorter than 10 minutes as ## OpenWeatherMap weather data is only updated every 10 minutes. # interval = "10m" diff --git a/plugins/inputs/p4runtime/p4runtime_test.go b/plugins/inputs/p4runtime/p4runtime_test.go index fe1c3f89dfc92..3bb297e38ca4a 100644 --- a/plugins/inputs/p4runtime/p4runtime_test.go +++ b/plugins/inputs/p4runtime/p4runtime_test.go @@ -641,9 +641,8 @@ func TestFilterCounterNamesInclude(t *testing.T) { filteredCounters := filterCounters(counters, counterNamesInclude) require.Equal( t, - filteredCounters, []*p4ConfigV1.Counter{ createCounter("bar", 2, p4ConfigV1.CounterSpec_BOTH), - }, + }, filteredCounters, ) } diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 46e1db3f657c9..205420e22519a 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -222,7 +222,7 @@ func TestArguments(t *testing.T) { for _, system := range []string{"darwin", "linux", "anything else"} { actual := p.args("www.google.com", system) - require.Equal(t, actual, expected) + require.Equal(t, expected, actual) } } @@ -405,7 +405,7 @@ func TestPingBinary(t *testing.T) { Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { - require.Equal(t, binary, "ping6") + require.Equal(t, "ping6", binary) return "", nil }, } diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index a704d39c78a55..f5322289155d9 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -368,7 +368,7 @@ func TestPingBinary(t *testing.T) { Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { - require.True(t, binary == "ping6") + require.Equal(t, "ping6", binary) return "", nil }, } diff --git a/plugins/inputs/powerdns/README.md b/plugins/inputs/powerdns/README.md index 724abd6c115fa..f3237974387ff 100644 --- a/plugins/inputs/powerdns/README.md +++ b/plugins/inputs/powerdns/README.md @@ -25,15 +25,16 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ### Permissions -Telegraf will need read access to the powerdns control socket. - -On many systems this can be accomplished by adding the `telegraf` user to the -`pdns` group: +Telegraf will need access to the powerdns control socket. On many systems this +can be accomplished by adding the `telegraf` user to the `pdns` group: ```sh usermod telegraf -a -G pdns ``` +Additionally, telegraf may need additional permissions. Look at the +`socket-mode` PowerDNS option to set permissions on the socket. + ## Metrics - powerdns diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index ae15e497a1d70..a26965670c5af 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -12,6 +12,7 @@ Processes can be selected for monitoring using one of several methods: - user - systemd_unit - cgroup +- supervisor_unit - win_service ## Global configuration options @@ -41,6 +42,8 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # include_systemd_children = false ## CGroup name or path, supports globs # cgroup = "systemd/system.slice/nginx.service" + ## Supervisor service names of hypervisorctl management + # supervisor_units = ["webserver", "proxy"] ## Windows service name # win_service = "" @@ -78,6 +81,11 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. Preliminary support for Windows has been added, however you may prefer using the `win_perf_counters` input plugin as a more mature alternative. +### Darwin specifics + +If you use this plugin with `supervisor_units` *and* `pattern` on Darwin, you +**have to** use the `pgrep` finder as the underlying library relies on `pgrep`. + ### Permissions Some files or directories may require elevated permissions. As such a user may @@ -109,6 +117,7 @@ Below are an example set of tags and fields: - systemd_unit (when defined) - cgroup (when defined) - cgroup_full (when cgroup or systemd_unit is used with glob) + - supervisor_unit (when defined) - win_service (when defined) - fields: - child_major_faults (int) @@ -179,6 +188,7 @@ Below are an example set of tags and fields: - user - systemd_unit - cgroup + - supervisor_unit - win_service - result - fields: diff --git a/plugins/inputs/procstat/dev/telegraf.conf b/plugins/inputs/procstat/dev/telegraf.conf deleted file mode 100644 index 63b150d7cc125..0000000000000 --- a/plugins/inputs/procstat/dev/telegraf.conf +++ /dev/null @@ -1,9 +0,0 @@ -[agent] - interval="1s" - flush_interval="1s" - -[[inputs.procstat]] - exe = "telegraf" - -[[outputs.file]] - files = ["stdout"] diff --git a/plugins/inputs/procstat/memmap_notlinux.go b/plugins/inputs/procstat/memmap_notlinux.go deleted file mode 100644 index 5d8733a76d40a..0000000000000 --- a/plugins/inputs/procstat/memmap_notlinux.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build !linux - -package procstat - -func collectMemmap(Process, string, map[string]any) {} diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 13fea9fe3bb1f..90af5eb14a02e 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -11,13 +11,7 @@ import ( ) // NativeFinder uses gopsutil to find processes -type NativeFinder struct { -} - -// NewNativeFinder ... -func NewNativeFinder() (PIDFinder, error) { - return &NativeFinder{}, nil -} +type NativeFinder struct{} // Uid will return all pids for the given user func (pg *NativeFinder) UID(user string) ([]PID, error) { @@ -80,6 +74,27 @@ func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) { return pids, err } +// Children matches children pids on the command line when the process was executed +func (pg *NativeFinder) Children(pid PID) ([]PID, error) { + // Get all running processes + p, err := process.NewProcess(int32(pid)) + if err != nil { + return nil, fmt.Errorf("getting process %d failed: %w", pid, err) + } + + // Get all children of the current process + children, err := p.Children() + if err != nil { + return nil, fmt.Errorf("unable to get children of process %d: %w", p.Pid, err) + } + pids := make([]PID, 0, len(children)) + for _, child := range children { + pids = append(pids, PID(child.Pid)) + } + + return pids, err +} + func (pg *NativeFinder) FastProcessList() ([]*process.Process, error) { pids, err := process.Pids() if err != nil { @@ -92,3 +107,28 @@ func (pg *NativeFinder) FastProcessList() ([]*process.Process, error) { } return result, nil } + +// Pattern matches on the process name +func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) { + var pids []PID + regxPattern, err := regexp.Compile(pattern) + if err != nil { + return pids, err + } + procs, err := pg.FastProcessList() + if err != nil { + return pids, err + } + for _, p := range procs { + name, err := processName(p) + if err != nil { + //skip, this can be caused by the pid no longer existing + //or you having no permissions to access it + continue + } + if regxPattern.MatchString(name) { + pids = append(pids, PID(p.Pid)) + } + } + return pids, err +} diff --git a/plugins/inputs/procstat/native_finder_notwindows.go b/plugins/inputs/procstat/native_finder_notwindows.go deleted file mode 100644 index ed5bf164ef4c1..0000000000000 --- a/plugins/inputs/procstat/native_finder_notwindows.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !windows - -package procstat - -import ( - "regexp" -) - -// Pattern matches on the process name -func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) { - var pids []PID - regxPattern, err := regexp.Compile(pattern) - if err != nil { - return pids, err - } - procs, err := pg.FastProcessList() - if err != nil { - return pids, err - } - for _, p := range procs { - name, err := p.Exe() - if err != nil { - //skip, this can be caused by the pid no longer existing - //or you having no permissions to access it - continue - } - if regxPattern.MatchString(name) { - pids = append(pids, PID(p.Pid)) - } - } - return pids, err -} diff --git a/plugins/inputs/procstat/native_finder_test.go b/plugins/inputs/procstat/native_finder_test.go index 56d1e578cad88..eaccd463148c3 100644 --- a/plugins/inputs/procstat/native_finder_test.go +++ b/plugins/inputs/procstat/native_finder_test.go @@ -1,29 +1,98 @@ package procstat import ( + "context" + "os" + "os/exec" + "os/user" + "runtime" "testing" "github.com/stretchr/testify/require" ) func BenchmarkPattern(b *testing.B) { - f, err := NewNativeFinder() - require.NoError(b, err) + finder := &NativeFinder{} for n := 0; n < b.N; n++ { - _, err := f.Pattern(".*") - if err != nil { - panic(err) - } + _, err := finder.Pattern(".*") + require.NoError(b, err) } } func BenchmarkFullPattern(b *testing.B) { - f, err := NewNativeFinder() - require.NoError(b, err) + finder := &NativeFinder{} for n := 0; n < b.N; n++ { - _, err := f.FullPattern(".*") - if err != nil { - panic(err) - } + _, err := finder.FullPattern(".*") + require.NoError(b, err) } } + +func TestChildPattern(t *testing.T) { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + t.Skip("Skipping test on unsupported platform") + } + + // Get our own process name + parentName, err := os.Executable() + require.NoError(t, err) + + // Spawn two child processes and get their PIDs + expected := make([]PID, 0, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // First process + cmd1 := exec.CommandContext(ctx, "/bin/sh") + require.NoError(t, cmd1.Start(), "starting first command failed") + expected = append(expected, PID(cmd1.Process.Pid)) + + // Second process + cmd2 := exec.CommandContext(ctx, "/bin/sh") + require.NoError(t, cmd2.Start(), "starting first command failed") + expected = append(expected, PID(cmd2.Process.Pid)) + + // Use the plugin to find the children + finder := &NativeFinder{} + parent, err := finder.Pattern(parentName) + require.NoError(t, err) + require.Len(t, parent, 1) + childs, err := finder.Children(parent[0]) + require.NoError(t, err) + require.ElementsMatch(t, expected, childs) +} + +func TestGather_RealPatternIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + pg := &NativeFinder{} + pids, err := pg.Pattern(`procstat`) + require.NoError(t, err) + require.NotEmpty(t, pids) +} + +func TestGather_RealFullPatternIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + if runtime.GOOS != "windows" { + t.Skip("Skipping integration test on Non-Windows OS") + } + pg := &NativeFinder{} + pids, err := pg.FullPattern(`%procstat%`) + require.NoError(t, err) + require.NotEmpty(t, pids) +} + +func TestGather_RealUserIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + currentUser, err := user.Current() + require.NoError(t, err) + + pg := &NativeFinder{} + pids, err := pg.UID(currentUser.Username) + require.NoError(t, err) + require.NotEmpty(t, pids) +} diff --git a/plugins/inputs/procstat/native_finder_windows.go b/plugins/inputs/procstat/native_finder_windows.go deleted file mode 100644 index 6dcc0575af258..0000000000000 --- a/plugins/inputs/procstat/native_finder_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package procstat - -import ( - "regexp" -) - -// Pattern matches on the process name -func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) { - var pids []PID - regxPattern, err := regexp.Compile(pattern) - if err != nil { - return pids, err - } - procs, err := pg.FastProcessList() - if err != nil { - return pids, err - } - for _, p := range procs { - name, err := p.Name() - if err != nil { - //skip, this can be caused by the pid no longer existing - //or you having no permissions to access it - continue - } - if regxPattern.MatchString(name) { - pids = append(pids, PID(p.Pid)) - } - } - return pids, err -} diff --git a/plugins/inputs/procstat/native_finder_windows_test.go b/plugins/inputs/procstat/native_finder_windows_test.go deleted file mode 100644 index 2a90344fa6761..0000000000000 --- a/plugins/inputs/procstat/native_finder_windows_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package procstat - -import ( - "fmt" - "os/user" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestGather_RealPatternIntegration(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - pg, err := NewNativeFinder() - require.NoError(t, err) - pids, err := pg.Pattern(`procstat`) - require.NoError(t, err) - fmt.Println(pids) - require.Equal(t, len(pids) > 0, true) -} - -func TestGather_RealFullPatternIntegration(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - pg, err := NewNativeFinder() - require.NoError(t, err) - pids, err := pg.FullPattern(`%procstat%`) - require.NoError(t, err) - fmt.Println(pids) - require.Equal(t, len(pids) > 0, true) -} - -func TestGather_RealUserIntegration(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - currentUser, err := user.Current() - require.NoError(t, err) - pg, err := NewNativeFinder() - require.NoError(t, err) - pids, err := pg.UID(currentUser.Username) - require.NoError(t, err) - fmt.Println(pids) - require.Equal(t, len(pids) > 0, true) -} diff --git a/plugins/inputs/procstat/memmap.go b/plugins/inputs/procstat/os_linux.go similarity index 73% rename from plugins/inputs/procstat/memmap.go rename to plugins/inputs/procstat/os_linux.go index e3e0dee4dfacd..598cf6d4c43c4 100644 --- a/plugins/inputs/procstat/memmap.go +++ b/plugins/inputs/procstat/os_linux.go @@ -2,6 +2,20 @@ package procstat +import ( + "errors" + + "github.com/shirou/gopsutil/v3/process" +) + +func processName(p *process.Process) (string, error) { + return p.Exe() +} + +func queryPidWithWinServiceName(_ string) (uint32, error) { + return 0, errors.New("os not supporting win_service option") +} + func collectMemmap(proc Process, prefix string, fields map[string]any) { memMapStats, err := proc.MemoryMaps(true) if err == nil && len(*memMapStats) == 1 { diff --git a/plugins/inputs/procstat/os_others.go b/plugins/inputs/procstat/os_others.go new file mode 100644 index 0000000000000..2d6e7f860bb1a --- /dev/null +++ b/plugins/inputs/procstat/os_others.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows + +package procstat + +import ( + "errors" + + "github.com/shirou/gopsutil/v3/process" +) + +func processName(p *process.Process) (string, error) { + return p.Exe() +} + +func queryPidWithWinServiceName(_ string) (uint32, error) { + return 0, errors.New("os not supporting win_service option") +} + +func collectMemmap(Process, string, map[string]any) {} diff --git a/plugins/inputs/procstat/win_service_windows.go b/plugins/inputs/procstat/os_windows.go similarity index 86% rename from plugins/inputs/procstat/win_service_windows.go rename to plugins/inputs/procstat/os_windows.go index a0426541df240..12f9cec8022c4 100644 --- a/plugins/inputs/procstat/win_service_windows.go +++ b/plugins/inputs/procstat/os_windows.go @@ -6,10 +6,15 @@ import ( "errors" "unsafe" + "github.com/shirou/gopsutil/v3/process" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) +func processName(p *process.Process) (string, error) { + return p.Name() +} + func getService(name string) (*mgr.Service, error) { m, err := mgr.Connect() if err != nil { @@ -48,3 +53,5 @@ func queryPidWithWinServiceName(winServiceName string) (uint32, error) { return p.ProcessId, nil } + +func collectMemmap(Process, string, map[string]any) {} diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 5f44f00c62df5..e80ed640bbab7 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -15,7 +15,7 @@ type Pgrep struct { path string } -func NewPgrep() (PIDFinder, error) { +func newPgrepFinder() (PIDFinder, error) { path, err := exec.LookPath("pgrep") if err != nil { return nil, fmt.Errorf("could not find pgrep binary: %w", err) @@ -40,43 +40,38 @@ func (pg *Pgrep) PidFile(path string) ([]PID, error) { func (pg *Pgrep) Pattern(pattern string) ([]PID, error) { args := []string{pattern} - return find(pg.path, args) + return pg.find(args) } func (pg *Pgrep) UID(user string) ([]PID, error) { args := []string{"-u", user} - return find(pg.path, args) + return pg.find(args) } func (pg *Pgrep) FullPattern(pattern string) ([]PID, error) { args := []string{"-f", pattern} - return find(pg.path, args) + return pg.find(args) } -func find(path string, args []string) ([]PID, error) { - out, err := run(path, args) - if err != nil { - return nil, err - } - - return parseOutput(out) +func (pg *Pgrep) Children(pid PID) ([]PID, error) { + args := []string{"-P", strconv.FormatInt(int64(pid), 10)} + return pg.find(args) } -func run(path string, args []string) (string, error) { - out, err := exec.Command(path, args...).Output() - - //if exit code 1, ie no processes found, do not return error - if i, _ := internal.ExitStatus(err); i == 1 { - return "", nil - } - +func (pg *Pgrep) find(args []string) ([]PID, error) { + // Execute pgrep with the given arguments + buf, err := exec.Command(pg.path, args...).Output() if err != nil { - return "", fmt.Errorf("error running %q: %w", path, err) + // Exit code 1 means "no processes found" so we should not return + // an error in this case. + if status, _ := internal.ExitStatus(err); status == 1 { + return nil, nil + } + return nil, fmt.Errorf("error running %q: %w", pg.path, err) } - return string(out), err -} + out := string(buf) -func parseOutput(out string) ([]PID, error) { + // Parse the command output to extract the PIDs pids := []PID{} fields := strings.Fields(out) for _, field := range fields { diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 7d7509af2462d..bcff5419b07c4 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -2,34 +2,20 @@ package procstat import ( "fmt" + "runtime" "time" - "github.com/shirou/gopsutil/v3/cpu" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/shirou/gopsutil/v3/process" ) -//nolint:interfacebloat // conditionally allow to contain more methods type Process interface { PID() PID - Tags() map[string]string - - PageFaults() (*process.PageFaultsStat, error) - IOCounters() (*process.IOCountersStat, error) - MemoryInfo() (*process.MemoryInfoStat, error) - MemoryMaps(bool) (*[]process.MemoryMapsStat, error) Name() (string, error) - Cmdline() (string, error) - NumCtxSwitches() (*process.NumCtxSwitchesStat, error) - NumFDs() (int32, error) - NumThreads() (int32, error) - Percent(interval time.Duration) (float64, error) - MemoryPercent() (float32, error) - Times() (*cpu.TimesStat, error) - RlimitUsage(bool) ([]process.RlimitStat, error) - Username() (string, error) - CreateTime() (int64, error) - Ppid() (int32, error) - Status() ([]string, error) + SetTag(string, string) + MemoryMaps(bool) (*[]process.MemoryMapsStat, error) + Metric(prefix string, cmdLineTag, solarisMode bool) telegraf.Metric } type PIDFinder interface { @@ -37,6 +23,7 @@ type PIDFinder interface { Pattern(pattern string) ([]PID, error) UID(user string) ([]PID, error) FullPattern(path string) ([]PID, error) + Children(pid PID) ([]PID, error) } type Proc struct { @@ -45,7 +32,7 @@ type Proc struct { *process.Process } -func NewProc(pid PID) (Process, error) { +func newProc(pid PID) (Process, error) { p, err := process.NewProcess(int32(pid)) if err != nil { return nil, err @@ -59,19 +46,15 @@ func NewProc(pid PID) (Process, error) { return proc, nil } -func (p *Proc) Tags() map[string]string { - return p.tags -} - func (p *Proc) PID() PID { return PID(p.Process.Pid) } -func (p *Proc) Username() (string, error) { - return p.Process.Username() +func (p *Proc) SetTag(k, v string) { + p.tags[k] = v } -func (p *Proc) Percent(_ time.Duration) (float64, error) { +func (p *Proc) percent(_ time.Duration) (float64, error) { cpuPerc, err := p.Process.Percent(time.Duration(0)) if !p.hasCPUTimes && err == nil { p.hasCPUTimes = true @@ -79,3 +62,159 @@ func (p *Proc) Percent(_ time.Duration) (float64, error) { } return cpuPerc, err } + +// Add metrics a single Process +func (p *Proc) Metric(prefix string, cmdLineTag, solarisMode bool) telegraf.Metric { + if prefix != "" { + prefix += "_" + } + + fields := make(map[string]interface{}) + + if _, nameInTags := p.tags["process_name"]; !nameInTags { + name, err := p.Name() + if err == nil { + p.tags["process_name"] = name + } + } + + if _, ok := p.tags["user"]; !ok { + user, err := p.Username() + if err == nil { + p.tags["user"] = user + } + } + + // If pid is not present as a tag, include it as a field. + if _, pidInTags := p.tags["pid"]; !pidInTags { + fields["pid"] = p.Pid + } + + // Add the command line as a tag if the option is set + if cmdLineTag { + if _, ok := p.tags["cmdline"]; !ok { + cmdline, err := p.Cmdline() + if err == nil { + p.tags["cmdline"] = cmdline + } + } + } + + numThreads, err := p.NumThreads() + if err == nil { + fields[prefix+"num_threads"] = numThreads + } + + fds, err := p.NumFDs() + if err == nil { + fields[prefix+"num_fds"] = fds + } + + ctx, err := p.NumCtxSwitches() + if err == nil { + fields[prefix+"voluntary_context_switches"] = ctx.Voluntary + fields[prefix+"involuntary_context_switches"] = ctx.Involuntary + } + + faults, err := p.PageFaults() + if err == nil { + fields[prefix+"minor_faults"] = faults.MinorFaults + fields[prefix+"major_faults"] = faults.MajorFaults + fields[prefix+"child_minor_faults"] = faults.ChildMinorFaults + fields[prefix+"child_major_faults"] = faults.ChildMajorFaults + } + + io, err := p.IOCounters() + if err == nil { + fields[prefix+"read_count"] = io.ReadCount + fields[prefix+"write_count"] = io.WriteCount + fields[prefix+"read_bytes"] = io.ReadBytes + fields[prefix+"write_bytes"] = io.WriteBytes + } + + createdAt, err := p.CreateTime() // returns epoch in ms + if err == nil { + fields[prefix+"created_at"] = createdAt * 1000000 // ms to ns + } + + cpuTime, err := p.Times() + if err == nil { + fields[prefix+"cpu_time_user"] = cpuTime.User + fields[prefix+"cpu_time_system"] = cpuTime.System + fields[prefix+"cpu_time_iowait"] = cpuTime.Iowait // only reported on Linux + } + + cpuPerc, err := p.percent(time.Duration(0)) + if err == nil { + if solarisMode { + fields[prefix+"cpu_usage"] = cpuPerc / float64(runtime.NumCPU()) + } else { + fields[prefix+"cpu_usage"] = cpuPerc + } + } + + // This only returns values for RSS and VMS + mem, err := p.MemoryInfo() + if err == nil { + fields[prefix+"memory_rss"] = mem.RSS + fields[prefix+"memory_vms"] = mem.VMS + } + + collectMemmap(p, prefix, fields) + + memPerc, err := p.MemoryPercent() + if err == nil { + fields[prefix+"memory_usage"] = memPerc + } + + rlims, err := p.RlimitUsage(true) + if err == nil { + for _, rlim := range rlims { + var name string + switch rlim.Resource { + case process.RLIMIT_CPU: + name = "cpu_time" + case process.RLIMIT_DATA: + name = "memory_data" + case process.RLIMIT_STACK: + name = "memory_stack" + case process.RLIMIT_RSS: + name = "memory_rss" + case process.RLIMIT_NOFILE: + name = "num_fds" + case process.RLIMIT_MEMLOCK: + name = "memory_locked" + case process.RLIMIT_AS: + name = "memory_vms" + case process.RLIMIT_LOCKS: + name = "file_locks" + case process.RLIMIT_SIGPENDING: + name = "signals_pending" + case process.RLIMIT_NICE: + name = "nice_priority" + case process.RLIMIT_RTPRIO: + name = "realtime_priority" + default: + continue + } + + fields[prefix+"rlimit_"+name+"_soft"] = rlim.Soft + fields[prefix+"rlimit_"+name+"_hard"] = rlim.Hard + if name != "file_locks" { // gopsutil doesn't currently track the used file locks count + fields[prefix+name] = rlim.Used + } + } + } + + ppid, err := p.Ppid() + if err == nil { + fields[prefix+"ppid"] = ppid + } + + status, err := p.Status() + if err == nil { + fields[prefix+"status"] = status[0] + } + + return metric.New("procstat", p.tags, fields, time.Time{}) +} diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 51fe845069e47..380210c550788 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -4,6 +4,7 @@ package procstat import ( "bytes" _ "embed" + "errors" "fmt" "os" "os/exec" @@ -13,397 +14,348 @@ import ( "strings" "time" - "github.com/shirou/gopsutil/v3/process" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/inputs" ) //go:embed sample.conf var sampleConfig string -var ( - defaultPIDFinder = NewPgrep - defaultProcess = NewProc -) +// execCommand is so tests can mock out exec.Command usage. +var execCommand = exec.Command type PID int32 type Procstat struct { - PidFinder string `toml:"pid_finder"` - PidFile string `toml:"pid_file"` - Exe string - Pattern string - Prefix string - CmdLineTag bool `toml:"cmdline_tag"` - ProcessName string - User string - SystemdUnit string `toml:"systemd_unit"` - IncludeSystemdChildren bool `toml:"include_systemd_children"` - CGroup string `toml:"cgroup"` - PidTag bool - WinService string `toml:"win_service"` - Mode string + PidFinder string `toml:"pid_finder"` + PidFile string `toml:"pid_file"` + Exe string `toml:"exe"` + Pattern string `toml:"pattern"` + Prefix string `toml:"prefix"` + CmdLineTag bool `toml:"cmdline_tag"` + ProcessName string `toml:"process_name"` + User string `toml:"user"` + SystemdUnits string `toml:"systemd_units"` + SupervisorUnit []string `toml:"supervisor_unit" deprecated:"1.29.0;use 'supervisor_units' instead"` + SupervisorUnits []string `toml:"supervisor_units"` + IncludeSystemdChildren bool `toml:"include_systemd_children"` + CGroup string `toml:"cgroup"` + PidTag bool `toml:"pid_tag"` + WinService string `toml:"win_service"` + Mode string `toml:"mode"` + Log telegraf.Logger `toml:"-"` solarisMode bool + finder PIDFinder + processes map[PID]Process - finder PIDFinder - - createPIDFinder func() (PIDFinder, error) - procs map[PID]Process - createProcess func(PID) (Process, error) + createProcess func(PID) (Process, error) } type PidsTags struct { - PIDS []PID + PIDs []PID Tags map[string]string - Err error } func (*Procstat) SampleConfig() string { return sampleConfig } -func (p *Procstat) Gather(acc telegraf.Accumulator) error { - if p.createPIDFinder == nil { - switch p.PidFinder { - case "native": - p.createPIDFinder = NewNativeFinder - case "pgrep": - p.createPIDFinder = NewPgrep - default: - p.PidFinder = "pgrep" - p.createPIDFinder = defaultPIDFinder - } - } - if p.createProcess == nil { - p.createProcess = defaultProcess - } +func (p *Procstat) Init() error { + // Check solaris mode + p.solarisMode = strings.ToLower(p.Mode) == "solaris" - pidCount := 0 - now := time.Now() - newProcs := make(map[PID]Process, len(p.procs)) - tags := make(map[string]string) - pidTags := p.findPids() - for _, pidTag := range pidTags { - pids := pidTag.PIDS - err := pidTag.Err - pidCount += len(pids) - for key, value := range pidTag.Tags { - tags[key] = value - } - if err != nil { - fields := map[string]interface{}{ - "pid_count": 0, - "running": 0, - "result_code": 1, - } - tags["pid_finder"] = p.PidFinder - tags["result"] = "lookup_error" - acc.AddFields("procstat_lookup", fields, tags, now) - return err + // Keep the old settings for compatibility + for _, u := range p.SupervisorUnit { + if !choice.Contains(u, p.SupervisorUnits) { + p.SupervisorUnits = append(p.SupervisorUnits, u) } - - p.updateProcesses(pids, pidTag.Tags, p.procs, newProcs) } - p.procs = newProcs - for _, proc := range p.procs { - p.addMetric(proc, acc, now) + // Check filtering + switch { + case len(p.SupervisorUnits) > 0, p.SystemdUnits != "", p.WinService != "", + p.CGroup != "", p.PidFile != "", p.Exe != "", p.Pattern != "", + p.User != "": + // Do nothing as those are valid settings + default: + return errors.New("require filter option but none set") } - fields := map[string]interface{}{ - "pid_count": pidCount, - "running": len(p.procs), - "result_code": 0, + // Instantiate the finder + switch p.PidFinder { + case "", "pgrep": + p.PidFinder = "pgrep" + finder, err := newPgrepFinder() + if err != nil { + return fmt.Errorf("creating pgrep finder failed: %w", err) + } + p.finder = finder + case "native": + // gopsutil relies on pgrep when looking up children on darwin + // see https://github.com/shirou/gopsutil/blob/v3.23.10/process/process_darwin.go#L235 + requiresChildren := len(p.SupervisorUnits) > 0 && p.Pattern != "" + if requiresChildren && runtime.GOOS == "darwin" { + return errors.New("configuration requires the 'pgrep' finder on you OS") + } + p.finder = &NativeFinder{} + case "test": + p.Log.Warn("running in test mode") + default: + return fmt.Errorf("unknown pid_finder %q", p.PidFinder) } - tags["pid_finder"] = p.PidFinder - tags["result"] = "success" - acc.AddFields("procstat_lookup", fields, tags, now) + // Initialize the running process cache + p.processes = make(map[PID]Process) return nil } -// Add metrics a single Process -func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time) { - var prefix string - if p.Prefix != "" { - prefix = p.Prefix + "_" - } - - fields := map[string]interface{}{} - - //If process_name tag is not already set, set to actual name - if _, nameInTags := proc.Tags()["process_name"]; !nameInTags { - name, err := proc.Name() - if err == nil { - proc.Tags()["process_name"] = name +func (p *Procstat) Gather(acc telegraf.Accumulator) error { + now := time.Now() + results, err := p.findPids() + if err != nil { + // Add lookup error-metric + fields := map[string]interface{}{ + "pid_count": 0, + "running": 0, + "result_code": 1, } - } - - //If user tag is not already set, set to actual name - if _, ok := proc.Tags()["user"]; !ok { - user, err := proc.Username() - if err == nil { - proc.Tags()["user"] = user + tags := map[string]string{ + "pid_finder": p.PidFinder, + "result": "lookup_error", } + acc.AddFields("procstat_lookup", fields, tags, now) + return err } - //If pid is not present as a tag, include it as a field. - if _, pidInTags := proc.Tags()["pid"]; !pidInTags { - fields["pid"] = int32(proc.PID()) - } - - //If cmd_line tag is true and it is not already set add cmdline as a tag - if p.CmdLineTag { - if _, ok := proc.Tags()["cmdline"]; !ok { - cmdline, err := proc.Cmdline() - if err == nil { - proc.Tags()["cmdline"] = cmdline + var count int + running := make(map[PID]bool) + for _, r := range results { + if len(r.PIDs) < 1 && len(p.SupervisorUnits) > 0 { + continue + } + count += len(r.PIDs) + for _, pid := range r.PIDs { + // Use the cached processes as we need the existing instances + // to compute delta-metrics (e.g. cpu-usage). + proc, found := p.processes[pid] + if !found { + // We've found a process that was not recorded before so add it + // to the list of processes + proc, err = p.createProcess(pid) + if err != nil { + // No problem; process may have ended after we found it + continue + } + // Assumption: if a process has no name, it probably does not exist + if name, _ := proc.Name(); name == "" { + continue + } + + // Add initial tags + for k, v := range r.Tags { + proc.SetTag(k, v) + } + + // Add pid tag if needed + if p.PidTag { + proc.SetTag("pid", strconv.Itoa(int(pid))) + } + if p.ProcessName != "" { + proc.SetTag("process_name", p.ProcessName) + } + p.processes[pid] = proc } + running[pid] = true + m := proc.Metric(p.Prefix, p.CmdLineTag, p.solarisMode) + m.SetTime(now) + acc.AddMetric(m) } } - numThreads, err := proc.NumThreads() - if err == nil { - fields[prefix+"num_threads"] = numThreads - } - - fds, err := proc.NumFDs() - if err == nil { - fields[prefix+"num_fds"] = fds - } - - ctx, err := proc.NumCtxSwitches() - if err == nil { - fields[prefix+"voluntary_context_switches"] = ctx.Voluntary - fields[prefix+"involuntary_context_switches"] = ctx.Involuntary + // Cleanup processes that are not running anymore + for pid := range p.processes { + if !running[pid] { + delete(p.processes, pid) + } } - faults, err := proc.PageFaults() - if err == nil { - fields[prefix+"minor_faults"] = faults.MinorFaults - fields[prefix+"major_faults"] = faults.MajorFaults - fields[prefix+"child_minor_faults"] = faults.ChildMinorFaults - fields[prefix+"child_major_faults"] = faults.ChildMajorFaults + // Add lookup statistics-metric + fields := map[string]interface{}{ + "pid_count": count, + "running": len(running), + "result_code": 0, } - - io, err := proc.IOCounters() - if err == nil { - fields[prefix+"read_count"] = io.ReadCount - fields[prefix+"write_count"] = io.WriteCount - fields[prefix+"read_bytes"] = io.ReadBytes - fields[prefix+"write_bytes"] = io.WriteBytes + tags := map[string]string{ + "pid_finder": p.PidFinder, + "result": "success", } - - createdAt, err := proc.CreateTime() // returns epoch in ms - if err == nil { - fields[prefix+"created_at"] = createdAt * 1000000 // ms to ns + if len(p.SupervisorUnits) > 0 { + tags["supervisor_unit"] = strings.Join(p.SupervisorUnits, ";") } + acc.AddFields("procstat_lookup", fields, tags, now) - cpuTime, err := proc.Times() - if err == nil { - fields[prefix+"cpu_time_user"] = cpuTime.User - fields[prefix+"cpu_time_system"] = cpuTime.System - fields[prefix+"cpu_time_iowait"] = cpuTime.Iowait // only reported on Linux - } + return nil +} - cpuPerc, err := proc.Percent(time.Duration(0)) - if err == nil { - if p.solarisMode { - fields[prefix+"cpu_usage"] = cpuPerc / float64(runtime.NumCPU()) - } else { - fields[prefix+"cpu_usage"] = cpuPerc +// Get matching PIDs and their initial tags +func (p *Procstat) findPids() ([]PidsTags, error) { + switch { + case len(p.SupervisorUnits) > 0: + return p.findSupervisorUnits() + case p.SystemdUnits != "": + return p.systemdUnitPIDs() + case p.WinService != "": + pids, err := p.winServicePIDs() + if err != nil { + return nil, err } - } - - // This only returns values for RSS and VMS - mem, err := proc.MemoryInfo() - if err == nil { - fields[prefix+"memory_rss"] = mem.RSS - fields[prefix+"memory_vms"] = mem.VMS - } - - collectMemmap(proc, prefix, fields) - - memPerc, err := proc.MemoryPercent() - if err == nil { - fields[prefix+"memory_usage"] = memPerc - } - - rlims, err := proc.RlimitUsage(true) - if err == nil { - for _, rlim := range rlims { - var name string - switch rlim.Resource { - case process.RLIMIT_CPU: - name = "cpu_time" - case process.RLIMIT_DATA: - name = "memory_data" - case process.RLIMIT_STACK: - name = "memory_stack" - case process.RLIMIT_RSS: - name = "memory_rss" - case process.RLIMIT_NOFILE: - name = "num_fds" - case process.RLIMIT_MEMLOCK: - name = "memory_locked" - case process.RLIMIT_AS: - name = "memory_vms" - case process.RLIMIT_LOCKS: - name = "file_locks" - case process.RLIMIT_SIGPENDING: - name = "signals_pending" - case process.RLIMIT_NICE: - name = "nice_priority" - case process.RLIMIT_RTPRIO: - name = "realtime_priority" - default: - continue - } - - fields[prefix+"rlimit_"+name+"_soft"] = rlim.Soft - fields[prefix+"rlimit_"+name+"_hard"] = rlim.Hard - if name != "file_locks" { // gopsutil doesn't currently track the used file locks count - fields[prefix+name] = rlim.Used - } + tags := map[string]string{"win_service": p.WinService} + return []PidsTags{{pids, tags}}, nil + case p.CGroup != "": + return p.cgroupPIDs() + case p.PidFile != "": + pids, err := p.finder.PidFile(p.PidFile) + if err != nil { + return nil, err } + tags := map[string]string{"pidfile": p.PidFile} + return []PidsTags{{pids, tags}}, nil + case p.Exe != "": + pids, err := p.finder.Pattern(p.Exe) + if err != nil { + return nil, err + } + tags := map[string]string{"exe": p.Exe} + return []PidsTags{{pids, tags}}, nil + case p.Pattern != "": + pids, err := p.finder.FullPattern(p.Pattern) + if err != nil { + return nil, err + } + tags := map[string]string{"pattern": p.Pattern} + return []PidsTags{{pids, tags}}, nil + case p.User != "": + pids, err := p.finder.UID(p.User) + if err != nil { + return nil, err + } + tags := map[string]string{"user": p.User} + return []PidsTags{{pids, tags}}, nil } + return nil, errors.New("no filter option set") +} - ppid, err := proc.Ppid() - if err == nil { - fields[prefix+"ppid"] = ppid +func (p *Procstat) findSupervisorUnits() ([]PidsTags, error) { + groups, groupsTags, err := p.supervisorPIDs() + if err != nil { + return nil, fmt.Errorf("getting supervisor PIDs failed: %w", err) } - status, err := proc.Status() - if err == nil { - fields[prefix+"status"] = status[0] - } + // According to the PID, find the system process number and get the child processes + pidTags := make([]PidsTags, 0, len(groups)) + for _, group := range groups { + grppid := groupsTags[group]["pid"] + if grppid == "" { + pidTags = append(pidTags, PidsTags{nil, groupsTags[group]}) + continue + } - acc.AddFields("procstat", fields, proc.Tags(), t) -} + pid, err := strconv.ParseInt(grppid, 10, 32) + if err != nil { + return nil, fmt.Errorf("converting PID %q failed: %w", grppid, err) + } -// Update monitored Processes -func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process, procs map[PID]Process) { - for _, pid := range pids { - info, ok := prevInfo[pid] - if ok { - // Assumption: if a process has no name, it probably does not exist - if name, _ := info.Name(); name == "" { - continue - } - procs[pid] = info - } else { - proc, err := p.createProcess(pid) - if err != nil { - // No problem; process may have ended after we found it - continue - } - // Assumption: if a process has no name, it probably does not exist - if name, _ := proc.Name(); name == "" { - continue - } - procs[pid] = proc + // Get all children of the supervisor unit + pids, err := p.finder.Children(PID(pid)) + if err != nil { + return nil, fmt.Errorf("getting children for %d failed: %w", pid, err) + } + tags := map[string]string{"pattern": p.Pattern, "parent_pid": p.Pattern} - // Add initial tags - for k, v := range tags { - proc.Tags()[k] = v - } + // Handle situations where the PID does not exist + if len(pids) == 0 { + continue + } - // Add pid tag if needed - if p.PidTag { - proc.Tags()["pid"] = strconv.Itoa(int(pid)) - } - if p.ProcessName != "" { - proc.Tags()["process_name"] = p.ProcessName + // Merge tags map + for k, v := range groupsTags[group] { + _, ok := tags[k] + if !ok { + tags[k] = v } } + // Remove duplicate pid tags + delete(tags, "pid") + pidTags = append(pidTags, PidsTags{pids, tags}) } + return pidTags, nil } -// Create and return PIDGatherer lazily -func (p *Procstat) getPIDFinder() (PIDFinder, error) { - if p.finder == nil { - f, err := p.createPIDFinder() - if err != nil { - return nil, err +func (p *Procstat) supervisorPIDs() ([]string, map[string]map[string]string, error) { + out, err := execCommand("supervisorctl", "status", strings.Join(p.SupervisorUnits, " ")).Output() + if err != nil { + if !strings.Contains(err.Error(), "exit status 3") { + return nil, nil, err } - p.finder = f - } - return p.finder, nil -} - -// Get matching PIDs and their initial tags -func (p *Procstat) findPids() []PidsTags { - var pidTags []PidsTags - - if p.SystemdUnit != "" { - groups := p.systemdUnitPIDs() - return groups - } else if p.CGroup != "" { - groups := p.cgroupPIDs() - return groups } + lines := strings.Split(string(out), "\n") + // Get the PID, running status, running time and boot time of the main process: + // pid 11779, uptime 17:41:16 + // Exited too quickly (process log may have details) + mainPids := make(map[string]map[string]string) + for _, line := range lines { + if line == "" { + continue + } - f, err := p.getPIDFinder() - if err != nil { - pidTags = append(pidTags, PidsTags{nil, nil, err}) - return pidTags - } - pids, tags, err := p.SimpleFindPids(f) - pidTags = append(pidTags, PidsTags{pids, tags, err}) + kv := strings.Fields(line) + if len(kv) < 2 { + // Not a key-value pair + continue + } + name := kv[0] - return pidTags -} + statusMap := map[string]string{ + "supervisor_unit": name, + "status": kv[1], + } -// Get matching PIDs and their initial tags -func (p *Procstat) SimpleFindPids(f PIDFinder) ([]PID, map[string]string, error) { - var pids []PID - tags := make(map[string]string) - var err error - - if p.PidFile != "" { - pids, err = f.PidFile(p.PidFile) - tags = map[string]string{"pidfile": p.PidFile} - } else if p.Exe != "" { - pids, err = f.Pattern(p.Exe) - tags = map[string]string{"exe": p.Exe} - } else if p.Pattern != "" { - pids, err = f.FullPattern(p.Pattern) - tags = map[string]string{"pattern": p.Pattern} - } else if p.User != "" { - pids, err = f.UID(p.User) - tags = map[string]string{"user": p.User} - } else if p.WinService != "" { - pids, err = p.winServicePIDs() - tags = map[string]string{"win_service": p.WinService} - } else { - err = fmt.Errorf("either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") + switch kv[1] { + case "FATAL", "EXITED", "BACKOFF", "STOPPING": + statusMap["error"] = strings.Join(kv[2:], " ") + case "RUNNING": + statusMap["pid"] = strings.ReplaceAll(kv[3], ",", "") + statusMap["uptimes"] = kv[5] + case "STOPPED", "UNKNOWN", "STARTING": + // No additional info + } + mainPids[name] = statusMap } - return pids, tags, err + return p.SupervisorUnits, mainPids, nil } -// execCommand is so tests can mock out exec.Command usage. -var execCommand = exec.Command - -func (p *Procstat) systemdUnitPIDs() []PidsTags { +func (p *Procstat) systemdUnitPIDs() ([]PidsTags, error) { if p.IncludeSystemdChildren { - p.CGroup = fmt.Sprintf("systemd/system.slice/%s", p.SystemdUnit) + p.CGroup = fmt.Sprintf("systemd/system.slice/%s", p.SystemdUnits) return p.cgroupPIDs() } var pidTags []PidsTags - pids, err := p.simpleSystemdUnitPIDs() - tags := map[string]string{"systemd_unit": p.SystemdUnit} - pidTags = append(pidTags, PidsTags{pids, tags, err}) - return pidTags + if err != nil { + return nil, err + } + tags := map[string]string{"systemd_unit": p.SystemdUnits} + pidTags = append(pidTags, PidsTags{pids, tags}) + return pidTags, nil } func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { - out, err := execCommand("systemctl", "show", p.SystemdUnit).Output() + out, err := execCommand("systemctl", "show", p.SystemdUnits).Output() if err != nil { return nil, err } @@ -431,7 +383,7 @@ func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { return pids, nil } -func (p *Procstat) cgroupPIDs() []PidsTags { +func (p *Procstat) cgroupPIDs() ([]PidsTags, error) { procsPath := p.CGroup if procsPath[0] != '/' { procsPath = "/sys/fs/cgroup/" + procsPath @@ -439,17 +391,20 @@ func (p *Procstat) cgroupPIDs() []PidsTags { items, err := filepath.Glob(procsPath) if err != nil { - return []PidsTags{{nil, nil, fmt.Errorf("glob failed: %w", err)}} + return nil, fmt.Errorf("glob failed: %w", err) } pidTags := make([]PidsTags, 0, len(items)) for _, item := range items { pids, err := p.singleCgroupPIDs(item) + if err != nil { + return nil, err + } tags := map[string]string{"cgroup": p.CGroup, "cgroup_full": item} - pidTags = append(pidTags, PidsTags{pids, tags, err}) + pidTags = append(pidTags, PidsTags{pids, tags}) } - return pidTags + return pidTags, nil } func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { @@ -503,16 +458,8 @@ func (p *Procstat) winServicePIDs() ([]PID, error) { return pids, nil } -func (p *Procstat) Init() error { - if strings.ToLower(p.Mode) == "solaris" { - p.solarisMode = true - } - - return nil -} - func init() { inputs.Add("procstat", func() telegraf.Input { - return &Procstat{} + return &Procstat{createProcess: newProc} }) } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 20c5bec39158a..e0181d62bfa8a 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -10,10 +10,11 @@ import ( "testing" "time" - "github.com/shirou/gopsutil/v3/cpu" "github.com/shirou/gopsutil/v3/process" "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -54,6 +55,20 @@ ExecMainPID=11408 os.Exit(0) } + if cmdline == "supervisorctl status TestGather_supervisorUnitPIDs" { + fmt.Printf(`TestGather_supervisorUnitPIDs RUNNING pid 7311, uptime 0:00:19 +`) + //nolint:revive // error code is important for this "test" + os.Exit(0) + } + + if cmdline == "supervisorctl status TestGather_STARTINGsupervisorUnitPIDs TestGather_FATALsupervisorUnitPIDs" { + fmt.Printf(`TestGather_FATALsupervisorUnitPIDs FATAL Exited too quickly (process log may have details) +TestGather_STARTINGsupervisorUnitPIDs STARTING`) + //nolint:revive // error code is important for this "test" + os.Exit(0) + } + fmt.Printf("command not found\n") //nolint:revive // error code is important for this "test" os.Exit(1) @@ -64,12 +79,10 @@ type testPgrep struct { err error } -func pidFinder(pids []PID) func() (PIDFinder, error) { - return func() (PIDFinder, error) { - return &testPgrep{ - pids: pids, - err: nil, - }, nil +func newTestFinder(pids []PID) PIDFinder { + return &testPgrep{ + pids: pids, + err: nil, } } @@ -93,6 +106,11 @@ func (pg *testPgrep) FullPattern(_ string) ([]PID, error) { return pg.pids, pg.err } +func (pg *testPgrep) Children(_ PID) ([]PID, error) { + pids := []PID{7311, 8111, 8112} + return pids, pg.err +} + type testProc struct { pid PID tags map[string]string @@ -109,263 +127,306 @@ func (p *testProc) PID() PID { return p.pid } -func (p *testProc) Username() (string, error) { - return "testuser", nil -} - -func (p *testProc) Tags() map[string]string { - return p.tags -} - -func (p *testProc) PageFaults() (*process.PageFaultsStat, error) { - return &process.PageFaultsStat{}, nil -} - -func (p *testProc) IOCounters() (*process.IOCountersStat, error) { - return &process.IOCountersStat{}, nil +func (p *testProc) Name() (string, error) { + return "test_proc", nil } -func (p *testProc) MemoryInfo() (*process.MemoryInfoStat, error) { - return &process.MemoryInfoStat{}, nil +func (p *testProc) SetTag(k, v string) { + p.tags[k] = v } func (p *testProc) MemoryMaps(bool) (*[]process.MemoryMapsStat, error) { return &[]process.MemoryMapsStat{}, nil } -func (p *testProc) Name() (string, error) { - return "test_proc", nil -} - -func (p *testProc) NumCtxSwitches() (*process.NumCtxSwitchesStat, error) { - return &process.NumCtxSwitchesStat{}, nil -} - -func (p *testProc) NumFDs() (int32, error) { - return 0, nil -} +func (p *testProc) Metric(prefix string, cmdLineTag, _ bool) telegraf.Metric { + if prefix != "" { + prefix += "_" + } -func (p *testProc) NumThreads() (int32, error) { - return 0, nil -} + fields := map[string]interface{}{ + "pid": int32(p.pid), + "ppid": int32(0), + prefix + "num_fds": int32(0), + prefix + "num_threads": int32(0), + prefix + "voluntary_context_switches": int64(0), + prefix + "involuntary_context_switches": int64(0), + prefix + "minor_faults": uint64(0), + prefix + "major_faults": uint64(0), + prefix + "child_major_faults": uint64(0), + prefix + "child_minor_faults": uint64(0), + prefix + "read_bytes": uint64(0), + prefix + "read_count": uint64(0), + prefix + "write_bytes": uint64(0), + prefix + "write_count": uint64(0), + prefix + "created_at": int64(0), + prefix + "cpu_time_user": float64(0), + prefix + "cpu_time_system": float64(0), + prefix + "cpu_time_iowait": float64(0), + prefix + "cpu_usage": float64(0), + prefix + "memory_rss": uint64(0), + prefix + "memory_vms": uint64(0), + prefix + "memory_usage": float32(0), + prefix + "status": "running", + } -func (p *testProc) Percent(_ time.Duration) (float64, error) { - return 0, nil -} + tags := map[string]string{ + "process_name": "test_proc", + "user": "testuser", + } + for k, v := range p.tags { + tags[k] = v + } -func (p *testProc) MemoryPercent() (float32, error) { - return 0, nil + if cmdLineTag { + tags["cmdline"] = "test_proc" + } + return metric.New("procstat", tags, fields, time.Time{}) } -func (p *testProc) CreateTime() (int64, error) { - return 0, nil -} +var pid = PID(42) +var exe = "foo" -func (p *testProc) Times() (*cpu.TimesStat, error) { - return &cpu.TimesStat{}, nil +func TestInitInvalidFinder(t *testing.T) { + plugin := Procstat{ + PidFinder: "foo", + Log: testutil.Logger{}, + createProcess: newTestProc, + } + require.Error(t, plugin.Init()) } -func (p *testProc) RlimitUsage(_ bool) ([]process.RlimitStat, error) { - return []process.RlimitStat{}, nil -} +func TestInitRequiresChildDarwin(t *testing.T) { + if runtime.GOOS != "darwin" { + t.Skip("Skipping test on non-darwin platform") + } -func (p *testProc) Ppid() (int32, error) { - return 0, nil + p := Procstat{ + Pattern: "somepattern", + SupervisorUnits: []string{"a_unit"}, + PidFinder: "native", + Log: testutil.Logger{}, + } + require.ErrorContains(t, p.Init(), "requires the 'pgrep' finder") } -func (p *testProc) Status() ([]string, error) { - return []string{"running"}, nil +func TestInitMissingPidMethod(t *testing.T) { + p := Procstat{ + Log: testutil.Logger{}, + createProcess: newTestProc, + } + require.ErrorContains(t, p.Init(), "require filter option but none set") } -var pid = PID(42) -var exe = "foo" - func TestGather_CreateProcessErrorOk(t *testing.T) { - var acc testutil.Accumulator - p := Procstat{ - Exe: exe, - createPIDFinder: pidFinder([]PID{pid}), + Exe: exe, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), createProcess: func(PID) (Process, error) { return nil, fmt.Errorf("createProcess error") }, } - require.NoError(t, acc.GatherError(p.Gather)) -} + require.NoError(t, p.Init()) -func TestGather_CreatePIDFinderError(t *testing.T) { var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) +} +func TestGather_ProcessName(t *testing.T) { p := Procstat{ - createPIDFinder: func() (PIDFinder, error) { - return nil, fmt.Errorf("createPIDFinder error") - }, + Exe: exe, + ProcessName: "custom_name", + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), createProcess: newTestProc, } - require.Error(t, acc.GatherError(p.Gather)) -} + require.NoError(t, p.Init()) -func TestGather_ProcessName(t *testing.T) { var acc testutil.Accumulator - - p := Procstat{ - Exe: exe, - ProcessName: "custom_name", - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, - } - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Gather(&acc)) require.Equal(t, "custom_name", acc.TagValue("procstat", "process_name")) } func TestGather_NoProcessNameUsesReal(t *testing.T) { - var acc testutil.Accumulator pid := PID(os.Getpid()) p := Procstat{ - Exe: exe, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, + Exe: exe, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newTestProc, } - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Init()) + + var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) require.True(t, acc.HasTag("procstat", "process_name")) } func TestGather_NoPidTag(t *testing.T) { - var acc testutil.Accumulator - p := Procstat{ - Exe: exe, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, + Exe: exe, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newTestProc, } - require.NoError(t, acc.GatherError(p.Gather)) - require.True(t, acc.HasInt32Field("procstat", "pid")) + require.NoError(t, p.Init()) + + var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) + + require.True(t, acc.HasInt64Field("procstat", "pid")) require.False(t, acc.HasTag("procstat", "pid")) } func TestGather_PidTag(t *testing.T) { - var acc testutil.Accumulator - p := Procstat{ - Exe: exe, - PidTag: true, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, + Exe: exe, + PidTag: true, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newTestProc, } - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Init()) + + var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) + require.Equal(t, "42", acc.TagValue("procstat", "pid")) require.False(t, acc.HasInt32Field("procstat", "pid")) } func TestGather_Prefix(t *testing.T) { - var acc testutil.Accumulator - p := Procstat{ - Exe: exe, - Prefix: "custom_prefix", - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, + Exe: exe, + Prefix: "custom_prefix", + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newTestProc, } - require.NoError(t, acc.GatherError(p.Gather)) - require.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds")) -} + require.NoError(t, p.Init()) -func TestGather_Exe(t *testing.T) { var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) + + require.True(t, acc.HasInt64Field("procstat", "custom_prefix_num_fds")) +} +func TestGather_Exe(t *testing.T) { p := Procstat{ - Exe: exe, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, + Exe: exe, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newTestProc, } - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Init()) + + var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) require.Equal(t, exe, acc.TagValue("procstat", "exe")) } func TestGather_User(t *testing.T) { - var acc testutil.Accumulator user := "ada" p := Procstat{ - User: user, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, + User: user, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newTestProc, } - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Init()) + + var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) require.Equal(t, user, acc.TagValue("procstat", "user")) } func TestGather_Pattern(t *testing.T) { - var acc testutil.Accumulator pattern := "foo" p := Procstat{ - Pattern: pattern, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, + Pattern: pattern, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newTestProc, } - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Init()) - require.Equal(t, pattern, acc.TagValue("procstat", "pattern")) -} - -func TestGather_MissingPidMethod(t *testing.T) { var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) - p := Procstat{ - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, - } - require.Error(t, acc.GatherError(p.Gather)) + require.Equal(t, pattern, acc.TagValue("procstat", "pattern")) } func TestGather_PidFile(t *testing.T) { - var acc testutil.Accumulator pidfile := "/path/to/pidfile" p := Procstat{ - PidFile: pidfile, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, + PidFile: pidfile, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newTestProc, } - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Init()) + + var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) require.Equal(t, pidfile, acc.TagValue("procstat", "pidfile")) } func TestGather_PercentFirstPass(t *testing.T) { - var acc testutil.Accumulator pid := PID(os.Getpid()) p := Procstat{ - Pattern: "foo", - PidTag: true, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: NewProc, + Pattern: "foo", + PidTag: true, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newProc, } - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Init()) + + var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) require.True(t, acc.HasFloatField("procstat", "cpu_time_user")) require.False(t, acc.HasFloatField("procstat", "cpu_usage")) } func TestGather_PercentSecondPass(t *testing.T) { - var acc testutil.Accumulator pid := PID(os.Getpid()) p := Procstat{ - Pattern: "foo", - PidTag: true, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: NewProc, + Pattern: "foo", + PidTag: true, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newProc, } - require.NoError(t, acc.GatherError(p.Gather)) - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Init()) + + var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) + require.NoError(t, p.Gather(&acc)) require.True(t, acc.HasFloatField("procstat", "cpu_time_user")) require.True(t, acc.HasFloatField("procstat", "cpu_usage")) @@ -373,17 +434,19 @@ func TestGather_PercentSecondPass(t *testing.T) { func TestGather_systemdUnitPIDs(t *testing.T) { p := Procstat{ - createPIDFinder: pidFinder([]PID{}), - SystemdUnit: "TestGather_systemdUnitPIDs", + SystemdUnits: "TestGather_systemdUnitPIDs", + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), } - pidsTags := p.findPids() + require.NoError(t, p.Init()) + + pidsTags, err := p.findPids() + require.NoError(t, err) + for _, pidsTag := range pidsTags { - pids := pidsTag.PIDS - tags := pidsTag.Tags - err := pidsTag.Err - require.NoError(t, err) - require.Equal(t, []PID{11408}, pids) - require.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) + require.Equal(t, []PID{11408}, pidsTag.PIDs) + require.Equal(t, "TestGather_systemdUnitPIDs", pidsTag.Tags["systemd_unit"]) } } @@ -397,44 +460,95 @@ func TestGather_cgroupPIDs(t *testing.T) { require.NoError(t, err) p := Procstat{ - createPIDFinder: pidFinder([]PID{}), - CGroup: td, + CGroup: td, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), } - pidsTags := p.findPids() + require.NoError(t, p.Init()) + + pidsTags, err := p.findPids() + require.NoError(t, err) for _, pidsTag := range pidsTags { - pids := pidsTag.PIDS - tags := pidsTag.Tags - err := pidsTag.Err - require.NoError(t, err) - require.Equal(t, []PID{1234, 5678}, pids) - require.Equal(t, td, tags["cgroup"]) + require.Equal(t, []PID{1234, 5678}, pidsTag.PIDs) + require.Equal(t, td, pidsTag.Tags["cgroup"]) } } func TestProcstatLookupMetric(t *testing.T) { p := Procstat{ - createPIDFinder: pidFinder([]PID{543}), - Exe: "-Gsys", + Exe: "-Gsys", + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{543}), + createProcess: newProc, } + require.NoError(t, p.Init()) + var acc testutil.Accumulator - err := acc.GatherError(p.Gather) - require.NoError(t, err) - require.Len(t, acc.Metrics, len(p.procs)+1) + require.NoError(t, p.Gather(&acc)) + require.Len(t, acc.GetTelegrafMetrics(), 1) } func TestGather_SameTimestamps(t *testing.T) { - var acc testutil.Accumulator pidfile := "/path/to/pidfile" p := Procstat{ - PidFile: pidfile, - createPIDFinder: pidFinder([]PID{pid}), - createProcess: newTestProc, + PidFile: pidfile, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + createProcess: newTestProc, } - require.NoError(t, acc.GatherError(p.Gather)) + require.NoError(t, p.Init()) + + var acc testutil.Accumulator + require.NoError(t, p.Gather(&acc)) procstat, _ := acc.Get("procstat") procstatLookup, _ := acc.Get("procstat_lookup") require.Equal(t, procstat.Time, procstatLookup.Time) } + +func TestGather_supervisorUnitPIDs(t *testing.T) { + p := Procstat{ + SupervisorUnits: []string{"TestGather_supervisorUnitPIDs"}, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + } + require.NoError(t, p.Init()) + + pidsTags, err := p.findPids() + require.NoError(t, err) + for _, pidsTag := range pidsTags { + require.Equal(t, []PID{7311, 8111, 8112}, pidsTag.PIDs) + require.Equal(t, "TestGather_supervisorUnitPIDs", pidsTag.Tags["supervisor_unit"]) + } +} + +func TestGather_MoresupervisorUnitPIDs(t *testing.T) { + p := Procstat{ + SupervisorUnits: []string{"TestGather_STARTINGsupervisorUnitPIDs", "TestGather_FATALsupervisorUnitPIDs"}, + PidFinder: "test", + Log: testutil.Logger{}, + finder: newTestFinder([]PID{pid}), + } + require.NoError(t, p.Init()) + + pidsTags, err := p.findPids() + require.NoError(t, err) + for _, pidsTag := range pidsTags { + require.Empty(t, pidsTag.PIDs) + switch pidsTag.Tags["supervisor_unit"] { + case "TestGather_STARTINGsupervisorUnitPIDs": + require.Equal(t, "STARTING", pidsTag.Tags["status"]) + case "TestGather_FATALsupervisorUnitPIDs": + require.Equal(t, "FATAL", pidsTag.Tags["status"]) + require.Equal(t, "Exited too quickly (process log may have details)", pidsTag.Tags["error"]) + default: + t.Fatalf("unexpected value for tag 'supervisor_unit': %q", pidsTag.Tags["supervisor_unit"]) + } + } +} diff --git a/plugins/inputs/procstat/sample.conf b/plugins/inputs/procstat/sample.conf index f2d45545c36dc..2f5dbce59a5c3 100644 --- a/plugins/inputs/procstat/sample.conf +++ b/plugins/inputs/procstat/sample.conf @@ -13,6 +13,8 @@ # include_systemd_children = false ## CGroup name or path, supports globs # cgroup = "systemd/system.slice/nginx.service" + ## Supervisor service names of hypervisorctl management + # supervisor_units = ["webserver", "proxy"] ## Windows service name # win_service = "" diff --git a/plugins/inputs/procstat/win_service_notwindows.go b/plugins/inputs/procstat/win_service_notwindows.go deleted file mode 100644 index 733b8d24947e3..0000000000000 --- a/plugins/inputs/procstat/win_service_notwindows.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !windows - -package procstat - -import ( - "fmt" -) - -func queryPidWithWinServiceName(_ string) (uint32, error) { - return 0, fmt.Errorf("os not support win_service option") -} diff --git a/plugins/inputs/prometheus/consul.go b/plugins/inputs/prometheus/consul.go index 7cd026dbec440..b0914e314561e 100644 --- a/plugins/inputs/prometheus/consul.go +++ b/plugins/inputs/prometheus/consul.go @@ -94,13 +94,13 @@ func (p *Prometheus) startConsul(ctx context.Context) error { p.wg.Add(1) go func() { - // Store last error status and change log level depending on repeated occurence + // Store last error status and change log level depending on repeated occurrence var refreshFailed = false defer p.wg.Done() err := p.refreshConsulServices(catalog) if err != nil { refreshFailed = true - p.Log.Errorf("Unable to refreh Consul services: %v", err) + p.Log.Errorf("Unable to refresh Consul services: %v", err) } for { select { @@ -109,7 +109,7 @@ func (p *Prometheus) startConsul(ctx context.Context) error { case <-time.After(time.Duration(p.ConsulConfig.QueryInterval)): err := p.refreshConsulServices(catalog) if err != nil { - message := fmt.Sprintf("Unable to refreh Consul services: %v", err) + message := fmt.Sprintf("Unable to refresh Consul services: %v", err) if refreshFailed { p.Log.Debug(message) } else { diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index b3f132903fefb..2b1c372528115 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -364,7 +364,7 @@ func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { err = acc.GatherError(p.Gather) require.NoError(t, err) - require.Equal(t, acc.TagSetValue("prometheus", "quantile"), "0") + require.Equal(t, "0", acc.TagSetValue("prometheus", "quantile")) require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) require.Equal(t, acc.TagValue("prometheus", "url"), ts.URL+"/metrics") diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index fc7eb2d83724a..9e96c87255f7d 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -65,7 +65,7 @@ func TestGetNodeSearchDomain(t *testing.T) { err := getNodeSearchDomain(px) require.NoError(t, err) - require.Equal(t, px.nodeSearchDomain, "test.example.com") + require.Equal(t, "test.example.com", px.nodeSearchDomain) } func TestGatherLxcData(t *testing.T) { @@ -75,7 +75,7 @@ func TestGatherLxcData(t *testing.T) { acc := &testutil.Accumulator{} gatherLxcData(px, acc) - require.Equal(t, acc.NFields(), 15) + require.Equal(t, 15, acc.NFields()) testFields := map[string]interface{}{ "status": "running", "uptime": int64(2078164), @@ -109,7 +109,7 @@ func TestGatherQemuData(t *testing.T) { acc := &testutil.Accumulator{} gatherQemuData(px, acc) - require.Equal(t, acc.NFields(), 15) + require.Equal(t, 15, acc.NFields()) testFields := map[string]interface{}{ "status": "running", "uptime": int64(2159739), @@ -145,5 +145,5 @@ func TestGather(t *testing.T) { require.NoError(t, err) // Results from both tests above - require.Equal(t, acc.NFields(), 30) + require.Equal(t, 30, acc.NFields()) } diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index 6da64dbb4d207..387c2defc24a7 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -49,7 +49,7 @@ func TestRaindropsGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - require.Equal(t, r.URL.Path, "/_raindrops", "Cannot handle request") + require.Equal(t, "/_raindrops", r.URL.Path, "Cannot handle request") rsp = sampleResponse _, err := fmt.Fprintln(w, rsp) diff --git a/plugins/inputs/s7comm/s7comm.go b/plugins/inputs/s7comm/s7comm.go index bbfcee38bbe31..d6795792db772 100644 --- a/plugins/inputs/s7comm/s7comm.go +++ b/plugins/inputs/s7comm/s7comm.go @@ -301,9 +301,9 @@ func handleFieldAddress(address string) (*gos7.S7DataItem, converterFunc, error) } // Check the amount parameter if any - var extra int + var extra, bit int switch dtype { - case "X", "S": + case "S": // We require an extra parameter x := groups["extra"] if x == "" { @@ -317,6 +317,21 @@ func handleFieldAddress(address string) (*gos7.S7DataItem, converterFunc, error) if extra < 1 { return nil, nil, fmt.Errorf("invalid extra parameter %d", extra) } + case "X": + // We require an extra parameter + x := groups["extra"] + if x == "" { + return nil, nil, errors.New("extra parameter required") + } + + bit, err = strconv.Atoi(x) + if err != nil { + return nil, nil, fmt.Errorf("invalid extra parameter: %w", err) + } + if bit < 0 || bit > 7 { + // Ensure bit address is valid + return nil, nil, fmt.Errorf("invalid extra parameter: bit address %d out of range", bit) + } default: if groups["extra"] != "" { return nil, nil, errors.New("extra parameter specified but not used") @@ -348,6 +363,7 @@ func handleFieldAddress(address string) (*gos7.S7DataItem, converterFunc, error) item := &gos7.S7DataItem{ Area: area, WordLen: wordlen, + Bit: bit, DBNumber: areaidx, Start: start, Amount: amount, @@ -355,7 +371,7 @@ func handleFieldAddress(address string) (*gos7.S7DataItem, converterFunc, error) } // Determine the type converter function - f := determineConversion(dtype, extra) + f := determineConversion(dtype) return item, f, nil } diff --git a/plugins/inputs/s7comm/s7comm_test.go b/plugins/inputs/s7comm/s7comm_test.go index e390c849be8ee..f368f1fe4743b 100644 --- a/plugins/inputs/s7comm/s7comm_test.go +++ b/plugins/inputs/s7comm/s7comm_test.go @@ -258,6 +258,7 @@ func TestFieldMappings(t *testing.T) { { Area: 0x84, WordLen: 0x01, + Bit: 2, DBNumber: 5, Start: 3, Amount: 1, diff --git a/plugins/inputs/s7comm/type_conversions.go b/plugins/inputs/s7comm/type_conversions.go index 404b43a0ad3fd..43119393d5f0d 100644 --- a/plugins/inputs/s7comm/type_conversions.go +++ b/plugins/inputs/s7comm/type_conversions.go @@ -9,11 +9,11 @@ import ( var helper = &gos7.Helper{} -func determineConversion(dtype string, extra int) converterFunc { +func determineConversion(dtype string) converterFunc { switch dtype { case "X": return func(buf []byte) interface{} { - return (buf[0] & (1 << extra)) != 0 + return buf[0] != 0 } case "B": return func(buf []byte) interface{} { @@ -29,7 +29,7 @@ func determineConversion(dtype string, extra int) converterFunc { return "" } // Get the length of the encoded string - length := int(buf[0]) + length := int(buf[1]) // Clip the string if we do not fill the whole buffer if length < len(buf)-2 { return string(buf[2 : 2+length]) diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 7fa7f25633bf8..d5397e6677a57 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -54,7 +54,7 @@ details. # version = 2 ## Unconnected UDP socket - ## When true, SNMP reponses are accepted from any address not just + ## When true, SNMP responses are accepted from any address not just ## the requested address. This can be useful when gathering from ## redundant/failover systems. # unconnected_udp_socket = false @@ -67,7 +67,8 @@ details. ## SNMP community string. # community = "public" - ## Agent host tag + ## Agent host tag; should be set to "source" for consistent usage across plugins + ## example: agent_host_tag = "source" # agent_host_tag = "agent_host" ## Number of retries to attempt. @@ -100,17 +101,18 @@ details. ## full plugin documentation for configuration details. [[inputs.snmp.field]] oid = "RFC1213-MIB::sysUpTime.0" - name = "uptime" + name = "sysUptime" + conversion = "float(2)" [[inputs.snmp.field]] oid = "RFC1213-MIB::sysName.0" - name = "source" + name = "sysName" is_tag = true [[inputs.snmp.table]] oid = "IF-MIB::ifTable" name = "interface" - inherit_tags = ["source"] + inherit_tags = ["sysName"] [[inputs.snmp.table.field]] oid = "IF-MIB::ifDescr" @@ -290,7 +292,7 @@ name = "EntPhyIndex" oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" ``` -Partial result (removed agent_host and host columns from all following outputs +Partial result (removed agent and host tags from all following outputs in this section): ```text @@ -386,13 +388,17 @@ sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 The field and tags will depend on the table and fields configured. +* snmp + * tags: + * agent_host (deprecated in 1.29: use `source` instead) + ## Example Output ```text -snmp,agent_host=127.0.0.1,source=loaner uptime=11331974i 1575509815000000000 -interface,agent_host=127.0.0.1,ifDescr=wlan0,ifIndex=3,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=3436617431i,ifInUcastPkts=2717778i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=581368041i,ifOutQLen=0i,ifOutUcastPkts=1354338i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=0i,ifType=6i 1575509815000000000 -interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=21i,ifInOctets=3852386380i,ifInUcastPkts=3634004i,ifInUnknownProtos=0i,ifLastChange=9088763i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=434865441i,ifOutQLen=0i,ifOutUcastPkts=2110394i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=1000000000i,ifType=6i 1575509815000000000 -interface,agent_host=127.0.0.1,ifDescr=lo,ifIndex=1,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=51555569i,ifInUcastPkts=339097i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=65536i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=51555569i,ifOutQLen=0i,ifOutUcastPkts=339097i,ifSpecific=".0.0",ifSpeed=10000000i,ifType=24i 1575509815000000000 +snmp,agent_host=127.0.0.1,sysName=example.org uptime=113319.74 1575509815000000000 +interface,agent_host=127.0.0.1,ifDescr=wlan0,ifIndex=3,sysName=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=3436617431i,ifInUcastPkts=2717778i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=581368041i,ifOutQLen=0i,ifOutUcastPkts=1354338i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=0i,ifType=6i 1575509815000000000 +interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,sysName=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=21i,ifInOctets=3852386380i,ifInUcastPkts=3634004i,ifInUnknownProtos=0i,ifLastChange=9088763i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=434865441i,ifOutQLen=0i,ifOutUcastPkts=2110394i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=1000000000i,ifType=6i 1575509815000000000 +interface,agent_host=127.0.0.1,ifDescr=lo,ifIndex=1,sysName=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=51555569i,ifInUcastPkts=339097i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=65536i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=51555569i,ifOutQLen=0i,ifOutUcastPkts=339097i,ifSpecific=".0.0",ifSpeed=10000000i,ifType=24i 1575509815000000000 ``` [metric filtering]: /docs/CONFIGURATION.md#metric-filtering diff --git a/plugins/inputs/snmp/gosmi_test.go b/plugins/inputs/snmp/gosmi_test.go index fb56232695c29..57ff77b15e679 100644 --- a/plugins/inputs/snmp/gosmi_test.go +++ b/plugins/inputs/snmp/gosmi_test.go @@ -267,7 +267,7 @@ func TestTableBuild_walk_noTranslate(t *testing.T) { tb, err := tbl.Build(gosmiTsc, true, tr) require.NoError(t, err) - require.Equal(t, tb.Name, "mytable") + require.Equal(t, "mytable", tb.Name) rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "foo", @@ -350,7 +350,7 @@ func TestTableBuild_walk_Translate(t *testing.T) { tb, err := tbl.Build(gosmiTsc, true, tr) require.NoError(t, err) - require.Equal(t, tb.Name, "atTable") + require.Equal(t, "atTable", tb.Name) rtr1 := RTableRow{ Tags: map[string]string{ @@ -698,7 +698,7 @@ func TestTableJoin_walkGosmi(t *testing.T) { tb, err := tbl.Build(gosmiTsc, true, tr) require.NoError(t, err) - require.Equal(t, tb.Name, "mytable") + require.Equal(t, "mytable", tb.Name) rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -781,7 +781,7 @@ func TestTableOuterJoin_walkGosmi(t *testing.T) { tb, err := tbl.Build(gosmiTsc, true, tr) require.NoError(t, err) - require.Equal(t, tb.Name, "mytable") + require.Equal(t, "mytable", tb.Name) rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -873,7 +873,7 @@ func TestTableJoinNoIndexAsTag_walkGosmi(t *testing.T) { tb, err := tbl.Build(gosmiTsc, true, tr) require.NoError(t, err) - require.Equal(t, tb.Name, "mytable") + require.Equal(t, "mytable", tb.Name) rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", diff --git a/plugins/inputs/snmp/sample.conf b/plugins/inputs/snmp/sample.conf index d2049eb5a90e9..0b30d41c7dd6c 100644 --- a/plugins/inputs/snmp/sample.conf +++ b/plugins/inputs/snmp/sample.conf @@ -17,7 +17,7 @@ # version = 2 ## Unconnected UDP socket - ## When true, SNMP reponses are accepted from any address not just + ## When true, SNMP responses are accepted from any address not just ## the requested address. This can be useful when gathering from ## redundant/failover systems. # unconnected_udp_socket = false @@ -30,7 +30,8 @@ ## SNMP community string. # community = "public" - ## Agent host tag + ## Agent host tag; should be set to "source" for consistent usage across plugins + ## example: agent_host_tag = "source" # agent_host_tag = "agent_host" ## Number of retries to attempt. @@ -63,17 +64,18 @@ ## full plugin documentation for configuration details. [[inputs.snmp.field]] oid = "RFC1213-MIB::sysUpTime.0" - name = "uptime" + name = "sysUptime" + conversion = "float(2)" [[inputs.snmp.field]] oid = "RFC1213-MIB::sysName.0" - name = "source" + name = "sysName" is_tag = true [[inputs.snmp.table]] oid = "IF-MIB::ifTable" name = "interface" - inherit_tags = ["source"] + inherit_tags = ["sysName"] [[inputs.snmp.table.field]] oid = "IF-MIB::ifDescr" diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index d037a9f80d2ce..ae68f5fac72de 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -18,6 +18,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -108,6 +109,12 @@ func (s *Snmp) Init() error { if len(s.AgentHostTag) == 0 { s.AgentHostTag = "agent_host" } + if s.AgentHostTag != "source" { + models.PrintOptionValueDeprecationNotice(telegraf.Warn, "inputs.snmp", "agent_host_tag", s.AgentHostTag, telegraf.DeprecationInfo{ + Since: "1.29.0", + Notice: `should be set to "source" for consistent usage across plugins`, + }) + } return nil } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 326a9d21742ed..44aec0ae99c8b 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -332,7 +332,7 @@ func TestGetSNMPConnection_v3(t *testing.T) { gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - require.Equal(t, gs.Version, gosnmp.Version3) + require.Equal(t, gosnmp.Version3, gs.Version) sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) require.Equal(t, "1.2.3.4", gsc.Host()) require.EqualValues(t, 20, gs.MaxRepetitions) @@ -453,7 +453,7 @@ func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - require.Equal(t, gs.Version, gosnmp.Version3) + require.Equal(t, gosnmp.Version3, gs.Version) sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) require.Equal(t, "1.2.3.4", gsc.Host()) require.EqualValues(t, 20, gs.MaxRepetitions) @@ -644,7 +644,7 @@ func TestTableBuild_walk(t *testing.T) { tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) require.NoError(t, err) - require.Equal(t, tb.Name, "mytable") + require.Equal(t, "mytable", tb.Name) rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "foo", @@ -988,7 +988,7 @@ func TestTableJoin_walk(t *testing.T) { tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) require.NoError(t, err) - require.Equal(t, tb.Name, "mytable") + require.Equal(t, "mytable", tb.Name) rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -1065,7 +1065,7 @@ func TestTableOuterJoin_walk(t *testing.T) { tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) require.NoError(t, err) - require.Equal(t, tb.Name, "mytable") + require.Equal(t, "mytable", tb.Name) rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -1151,7 +1151,7 @@ func TestTableJoinNoIndexAsTag_walk(t *testing.T) { tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) require.NoError(t, err) - require.Equal(t, tb.Name, "mytable") + require.Equal(t, "mytable", tb.Name) rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", diff --git a/plugins/inputs/socketstat/socketstat.go b/plugins/inputs/socketstat/socketstat.go index 5c7c5cb9b122a..400bd512dee9f 100644 --- a/plugins/inputs/socketstat/socketstat.go +++ b/plugins/inputs/socketstat/socketstat.go @@ -132,7 +132,7 @@ func (ss *Socketstat) parseAndGather(acc telegraf.Accumulator, data *bytes.Buffe // formats depending on the protocol. tags, fields = getTagsAndState(proto, words, ss.Log) - // This line containted metrics, so record that. + // This line contained metrics, so record that. flushData = true } if flushData { diff --git a/plugins/inputs/sql/drivers.go b/plugins/inputs/sql/drivers.go index 07e67b6df37f6..72c6cb977d8c6 100644 --- a/plugins/inputs/sql/drivers.go +++ b/plugins/inputs/sql/drivers.go @@ -3,6 +3,7 @@ package sql import ( // Blank imports to register the drivers _ "github.com/ClickHouse/clickhouse-go" + _ "github.com/IBM/nzgo/v12" _ "github.com/apache/arrow/go/v13/arrow/flight/flightsql/driver" _ "github.com/go-sql-driver/mysql" _ "github.com/jackc/pgx/v4/stdlib" diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 41ac7f1922001..6b0369af72b30 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -203,7 +203,7 @@ to use them. ## This setting/metric is optional and is disabled by default. # health_metric = false - ## Possible queries accross different versions of the collectors + ## Possible queries across different versions of the collectors ## Queries enabled by default for specific Database Type ## database_type = AzureSQLDB by default collects the following queries diff --git a/plugins/inputs/sqlserver/sample.conf b/plugins/inputs/sqlserver/sample.conf index 16da591b058cf..315d59abd501c 100644 --- a/plugins/inputs/sqlserver/sample.conf +++ b/plugins/inputs/sqlserver/sample.conf @@ -71,7 +71,7 @@ ## This setting/metric is optional and is disabled by default. # health_metric = false - ## Possible queries accross different versions of the collectors + ## Possible queries across different versions of the collectors ## Queries enabled by default for specific Database Type ## database_type = AzureSQLDB by default collects the following queries diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go index 6f4eb88486a5d..98be7a0246c07 100644 --- a/plugins/inputs/stackdriver/stackdriver_test.go +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -748,7 +748,7 @@ func TestGather(t *testing.T) { err := s.Gather(&acc) require.NoError(t, err) - require.Equalf(t, len(acc.Errors) > 0, tt.wantAccErr, + require.Equalf(t, tt.wantAccErr, len(acc.Errors) > 0, "Accumulator errors. got=%v, want=%t", acc.Errors, tt.wantAccErr) actual := []telegraf.Metric{} diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 8c405108e33dd..61744cbdf6168 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -1284,7 +1284,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"] require.Truef(t, ok, "Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found") - require.Equalf(t, cachedtiming.name, "valid_multiple", "Expected the name to be 'valid_multiple', got %s", cachedtiming.name) + require.Equalf(t, "valid_multiple", cachedtiming.name, "Expected the name to be 'valid_multiple', got %s", cachedtiming.name) // A 0 at samplerate 0.1 will add 10 values of 0, // A 0 with invalid samplerate will add a single 0, @@ -1617,12 +1617,12 @@ func TestParse_Counters_Delete(t *testing.T) { func TestParseKeyValue(t *testing.T) { k, v := parseKeyValue("foo=bar") - require.Equalf(t, k, "foo", "Expected %s, got %s", "foo", k) - require.Equalf(t, v, "bar", "Expected %s, got %s", "bar", v) + require.Equalf(t, "foo", k, "Expected %s, got %s", "foo", k) + require.Equalf(t, "bar", v, "Expected %s, got %s", "bar", v) k2, v2 := parseKeyValue("baz") - require.Equalf(t, k2, "", "Expected %s, got %s", "", k2) - require.Equalf(t, v2, "baz", "Expected %s, got %s", "baz", v2) + require.Equalf(t, "", k2, "Expected %s, got %s", "", k2) + require.Equalf(t, "baz", v2, "Expected %s, got %s", "baz", v2) } // Test utility functions @@ -1849,7 +1849,7 @@ func TestParse_Ints(t *testing.T) { acc := &testutil.Accumulator{} require.NoError(t, s.Gather(acc)) - require.Equal(t, s.Percentiles, []Number{90.0}) + require.Equal(t, []Number{90.0}, s.Percentiles) } func TestParse_KeyValue(t *testing.T) { diff --git a/plugins/inputs/system/system_test.go b/plugins/inputs/system/system_test.go index a94870e3f2701..22a2ab66d6daf 100644 --- a/plugins/inputs/system/system_test.go +++ b/plugins/inputs/system/system_test.go @@ -21,7 +21,7 @@ func TestUniqueUsers(t *testing.T) { }, }, { - name: "emptry entry", + name: "empty entry", expected: 0, data: []host.UserStat{}, }, diff --git a/plugins/inputs/tacacs/README.md b/plugins/inputs/tacacs/README.md index c8276584c4332..d6a719d1910a6 100644 --- a/plugins/inputs/tacacs/README.md +++ b/plugins/inputs/tacacs/README.md @@ -64,7 +64,7 @@ by the tacacs server, or filled by telegraf in case of a timeout. ### field `responsetime_ms` The field responsetime_ms is response time of the tacacs server -in miliseconds of the furthest achieved stage of auth. +in milliseconds of the furthest achieved stage of auth. In case of timeout, its filled by telegraf to be the value of the configured response_timeout. diff --git a/plugins/inputs/temp/README.md b/plugins/inputs/temp/README.md index d68b996e39d88..36af239509103 100644 --- a/plugins/inputs/temp/README.md +++ b/plugins/inputs/temp/README.md @@ -39,6 +39,11 @@ following command: wmic /namespace:\\root\wmi PATH MSAcpi_ThermalZoneTemperature ``` +If the result is "Not Supported" you may be running in a virtualized environment +and not a physical machine. Additionally, if you still get this result your +motherboard or system may not support querying these values. Finally, you may +be required to run as admin to get the values. + ## Example Output ```text diff --git a/plugins/inputs/unbound/unbound_test.go b/plugins/inputs/unbound/unbound_test.go index e9994d7ebe4d6..f1ec205934b04 100644 --- a/plugins/inputs/unbound/unbound_test.go +++ b/plugins/inputs/unbound/unbound_test.go @@ -27,7 +27,7 @@ func TestParseFullOutput(t *testing.T) { require.True(t, acc.HasMeasurement("unbound")) require.Len(t, acc.Metrics, 1) - require.Equal(t, acc.NFields(), 63) + require.Equal(t, 63, acc.NFields()) acc.AssertContainsFields(t, "unbound", parsedFullOutput) } @@ -46,7 +46,7 @@ func TestParseFullOutputThreadAsTag(t *testing.T) { require.True(t, acc.HasMeasurement("unbound_threads")) require.Len(t, acc.Metrics, 2) - require.Equal(t, acc.NFields(), 63) + require.Equal(t, 63, acc.NFields()) acc.AssertContainsFields(t, "unbound", parsedFullOutputThreadAsTagMeasurementUnbound) acc.AssertContainsFields(t, "unbound_threads", parsedFullOutputThreadAsTagMeasurementUnboundThreads) diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 6a3b305134a5a..ca45e8c4c3094 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -558,7 +558,7 @@ func TestVersions(t *testing.T) { require.Len(t, acc.Metrics, c.size) for _, m := range acc.Metrics { require.NotEmpty(t, m.Fields) - require.Equal(t, m.Measurement, "varnish") + require.Equal(t, "varnish", m.Measurement) for field := range m.Fields { require.NotContains(t, field, "reload_") } @@ -627,11 +627,11 @@ func TestVarnishAdmJson(t *testing.T) { require.NoError(t, err) activeVcl, err := getActiveVCLJson(bytes.NewBuffer(admJSON)) require.NoError(t, err) - require.Equal(t, activeVcl, "boot-123") + require.Equal(t, "boot-123", activeVcl) admJSON, err = os.ReadFile("test_data/" + "varnishadm-reload.json") require.NoError(t, err) activeVcl, err = getActiveVCLJson(bytes.NewBuffer(admJSON)) require.NoError(t, err) - require.Equal(t, activeVcl, "reload_20210723_091821_2056185") + require.Equal(t, "reload_20210723_091821_2056185", activeVcl) } diff --git a/plugins/inputs/vault/vault_test.go b/plugins/inputs/vault/vault_test.go index fde45c790bd5f..0028d13adef6e 100644 --- a/plugins/inputs/vault/vault_test.go +++ b/plugins/inputs/vault/vault_test.go @@ -238,7 +238,10 @@ func TestIntegration(t *testing.T) { // Collect the metrics and compare var acc testutil.Accumulator - require.NoError(t, plugin.Gather(&acc)) + require.Eventually(t, func() bool { + require.NoError(t, plugin.Gather(&acc)) + return len(acc.GetTelegrafMetrics()) > 50 + }, 5*time.Second, 100*time.Millisecond) actual := acc.GetTelegrafMetrics() testutil.RequireMetricsStructureSubset(t, expected, actual, options...) diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index a6064be88d338..b933942b9e6e1 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -476,7 +476,7 @@ func TestDisconnectedServerBehavior(t *testing.T) { v.DisconnectedServersBehavior = "something else" _, err = NewEndpoint(context.Background(), v, u, v.Log) require.Error(t, err) - require.Equal(t, err.Error(), `"something else" is not a valid value for disconnected_servers_behavior`) + require.Equal(t, `"something else" is not a valid value for disconnected_servers_behavior`, err.Error()) } func testCollection(t *testing.T, excludeClusters bool) { diff --git a/plugins/inputs/webhooks/artifactory/README.md b/plugins/inputs/webhooks/artifactory/README.md index 4dabfacacdabf..a79001610266c 100644 --- a/plugins/inputs/webhooks/artifactory/README.md +++ b/plugins/inputs/webhooks/artifactory/README.md @@ -1,12 +1,20 @@ -# artifactory webhook +# Artifactory Webhook -You need to configure to orginizations artifactory instance/s as detailed via the artifactory webhook documentation: . Multiple webhooks may need be needed to configure different domains. +You need to configure the organization's artifactory instance(s) as detailed +via the artifactory [webhook documentation][webhook docs]. Multiple webhooks may +need be needed to configure different domains. -You can also add a secret that will be used by telegraf to verify the authenticity of the requests. +You can also add a secret that will be used by telegraf to verify the +authenticity of the requests. + +[webhook docs]: https://www.jfrog.com/confluence/display/JFROG/Webhooks ## Events -The different events type can be found found in the webhook documentation: . Events are identified by their `domain` and `event`. The following sections break down each event by domain. +The different events type can be found found in the webhook documentation: +. +Events are identified by their `domain` and `event`. +The following sections break down each event by domain. ### Artifact Domain @@ -84,7 +92,8 @@ The Webhook is triggered when an artifact is copied from a repository. #### Properties Added Event -The Webhook is triggered when a property is added to an artifact/folder in a repository, or the repository itself. +The Webhook is triggered when a property is added to an artifact/folder +in a repository, or the repository itself. **Tags:** @@ -95,11 +104,12 @@ The Webhook is triggered when a property is added to an artifact/folder in a rep * 'name' string **Fields** * 'property_key' string -* 'property_values' string (joined comma seperated list) +* 'property_values' string (joined comma separated list) #### Properties Deleted Event -The Webhook is triggered when a property is deleted from an artifact/folder in a repository, or the repository itself. +The Webhook is triggered when a property is deleted from an artifact/folder in a +repository, or the repository itself. **Tags:** @@ -112,13 +122,14 @@ The Webhook is triggered when a property is deleted from an artifact/folder in a **Fields:** * 'property_key' string -* 'property_values' string (joined comma seperated list) +* 'property_values' string (joined comma separated list) ### Docker Domain #### Docker Pushed Event -The Webhook is triggered when a new tag of a Docker image is pushed to a Docker repository. +The Webhook is triggered when a new tag of a Docker image is pushed to a Docker +repository. **Tags:** @@ -135,12 +146,13 @@ The Webhook is triggered when a new tag of a Docker image is pushed to a Docker * 'sha256' string * 'tag' string * 'platforms' []object - * 'achitecture' string + * 'architecture' string * 'os' string #### Docker Deleted Event -The Webhook is triggered when a tag of a Docker image is deleted from a Docker repository. +The Webhook is triggered when a tag of a Docker image is deleted from a Docker +repository. **Tags:** @@ -157,7 +169,7 @@ The Webhook is triggered when a tag of a Docker image is deleted from a Docker r * 'sha256' string * 'tag' string * 'platforms' []object - * 'achitecture' string + * 'architecture' string * 'os' string #### Docker Promoted Event @@ -179,7 +191,7 @@ The Webhook is triggered when a tag of a Docker image is promoted. * 'sha256' string * 'tag' string * 'platforms' []object - * 'achitecture' string + * 'architecture' string * 'os' string ### Build Domain @@ -376,7 +388,8 @@ The Webhook is triggered when Release Bundle distribution has failed. #### Release Bundle Version Deletion Started EVent -The Webhook is triggered when a Release Bundle version deletion has started on one or more Edge nodes. +The Webhook is triggered when a Release Bundle version deletion has started on +one or more Edge nodes. **Tags:** @@ -398,7 +411,8 @@ The Webhook is triggered when a Release Bundle version deletion has started on o #### Release Bundle Version Deletion Completed Event -The Webhook is triggered when a Release Bundle version deletion has completed from one or more Edge nodes. +The Webhook is triggered when a Release Bundle version deletion has completed +from one or more Edge nodes. **Tags:** @@ -420,7 +434,8 @@ The Webhook is triggered when a Release Bundle version deletion has completed fr #### Release Bundle Version Deletion Failed Event -The Webhook is triggered when a Release Bundle version deletion has failed on one or more Edge nodes. +The Webhook is triggered when a Release Bundle version deletion has failed on +one or more Edge nodes. **Tags:** @@ -461,7 +476,8 @@ The Webhook is triggered when a Release Bundle was received on an Edge Node. ### Release Bundle Delete Started Event -The Webhook is triggered when a Release Bundle deletion from an Edge Node completed. +The Webhook is triggered when a Release Bundle deletion from an Edge Node +completed. **Tags:** @@ -478,7 +494,8 @@ The Webhook is triggered when a Release Bundle deletion from an Edge Node comple #### Release Bundle Delete Completed Event -The Webhook is triggered when a Release Bundle deletion from an Edge Node completed. +The Webhook is triggered when a Release Bundle deletion from an Edge Node +completed. **Tags:** diff --git a/plugins/inputs/webhooks/artifactory/artifactory_webhook_models.go b/plugins/inputs/webhooks/artifactory/artifactory_webhook_models.go index 7186794f5b0a4..33b21be72a29f 100644 --- a/plugins/inputs/webhooks/artifactory/artifactory_webhook_models.go +++ b/plugins/inputs/webhooks/artifactory/artifactory_webhook_models.go @@ -119,7 +119,7 @@ type DockerEvent struct { ImageName string `json:"image_name"` Tag string `json:"tag"` Platforms []struct { - Architecture string `json:"achitecture"` + Architecture string `json:"architecture"` Os string `json:"os"` } `json:"platforms"` } `json:"data"` diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md index c6810dd1354b3..2b9d06b17fe82 100644 --- a/plugins/inputs/win_perf_counters/README.md +++ b/plugins/inputs/win_perf_counters/README.md @@ -345,6 +345,10 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## e.g. IgnoredErrors = ["PDH_NO_DATA"] # IgnoredErrors = [] + ## Maximum size of the buffer for values returned by the API + ## Increase this value if you experience "buffer limit reached" errors. + # MaxBufferSize = "4MiB" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the ## plugin definition, otherwise additional config options are read as part of ## the table diff --git a/plugins/inputs/win_perf_counters/performance_query.go b/plugins/inputs/win_perf_counters/performance_query.go index 827fd85aa0afa..b548f63909b96 100644 --- a/plugins/inputs/win_perf_counters/performance_query.go +++ b/plugins/inputs/win_perf_counters/performance_query.go @@ -10,6 +10,11 @@ import ( "unsafe" ) +// Initial buffer size for return buffers +const initialBufferSize = uint32(1024) // 1kB + +var errBufferLimitReached = errors.New("buffer limit reached") + // CounterValue is abstraction for PdhFmtCountervalueItemDouble type CounterValue struct { InstanceName string @@ -36,7 +41,7 @@ type PerformanceQuery interface { } type PerformanceQueryCreator interface { - NewPerformanceQuery(string) PerformanceQuery + NewPerformanceQuery(string, uint32) PerformanceQuery } // PdhError represents error returned from Performance Counters API @@ -58,14 +63,14 @@ func NewPdhError(code uint32) error { // PerformanceQueryImpl is implementation of PerformanceQuery interface, which calls phd.dll functions type PerformanceQueryImpl struct { - query pdhQueryHandle + maxBufferSize uint32 + query pdhQueryHandle } -type PerformanceQueryCreatorImpl struct { -} +type PerformanceQueryCreatorImpl struct{} -func (m PerformanceQueryCreatorImpl) NewPerformanceQuery(string) PerformanceQuery { - return &PerformanceQueryImpl{} +func (m PerformanceQueryCreatorImpl) NewPerformanceQuery(_ string, maxBufferSize uint32) PerformanceQuery { + return &PerformanceQueryImpl{maxBufferSize: maxBufferSize} } // Open creates a new counterPath that is used to manage the collection of performance data. @@ -124,64 +129,82 @@ func (m *PerformanceQueryImpl) AddEnglishCounterToQuery(counterPath string) (pdh // GetCounterPath return counter information for given handle func (m *PerformanceQueryImpl) GetCounterPath(counterHandle pdhCounterHandle) (string, error) { - var bufSize uint32 - var buff []byte - var ret uint32 - if ret = PdhGetCounterInfo(counterHandle, 0, &bufSize, nil); ret == PdhMoreData { - buff = make([]byte, bufSize) - bufSize = uint32(len(buff)) - if ret = PdhGetCounterInfo(counterHandle, 0, &bufSize, &buff[0]); ret == ErrorSuccess { - ci := (*PdhCounterInfo)(unsafe.Pointer(&buff[0])) //nolint:gosec // G103: Valid use of unsafe call to create PDH_COUNTER_INFO + for buflen := initialBufferSize; buflen <= m.maxBufferSize; buflen *= 2 { + buf := make([]byte, buflen) + + // Get the info with the current buffer size + size := buflen + ret := PdhGetCounterInfo(counterHandle, 0, &size, &buf[0]) + if ret == ErrorSuccess { + ci := (*PdhCounterInfo)(unsafe.Pointer(&buf[0])) //nolint:gosec // G103: Valid use of unsafe call to create PDH_COUNTER_INFO return UTF16PtrToString(ci.SzFullPath), nil } + + // Use the size as a hint if it exceeds the current buffer size + if size > buflen { + buflen = size + } + + // We got a non-recoverable error so exit here + if ret != PdhMoreData { + return "", NewPdhError(ret) + } } - return "", NewPdhError(ret) + + return "", errBufferLimitReached } // ExpandWildCardPath examines local computer and returns those counter paths that match the given counter path which contains wildcard characters. func (m *PerformanceQueryImpl) ExpandWildCardPath(counterPath string) ([]string, error) { - var bufSize uint32 - var buff []uint16 - var ret uint32 + for buflen := initialBufferSize; buflen <= m.maxBufferSize; buflen *= 2 { + buf := make([]uint16, buflen) - if ret = PdhExpandWildCardPath(counterPath, nil, &bufSize); ret == PdhMoreData { - buff = make([]uint16, bufSize) - bufSize = uint32(len(buff)) - ret = PdhExpandWildCardPath(counterPath, &buff[0], &bufSize) + // Get the info with the current buffer size + size := buflen + ret := PdhExpandWildCardPath(counterPath, &buf[0], &size) if ret == ErrorSuccess { - list := UTF16ToStringArray(buff) - return list, nil + return UTF16ToStringArray(buf), nil + } + + // Use the size as a hint if it exceeds the current buffer size + if size > buflen { + buflen = size + } + + // We got a non-recoverable error so exit here + if ret != PdhMoreData { + return nil, NewPdhError(ret) } } - return nil, NewPdhError(ret) + + return nil, errBufferLimitReached } // GetFormattedCounterValueDouble computes a displayable value for the specified counter func (m *PerformanceQueryImpl) GetFormattedCounterValueDouble(hCounter pdhCounterHandle) (float64, error) { var counterType uint32 var value PdhFmtCountervalueDouble - var ret uint32 - if ret = PdhGetFormattedCounterValueDouble(hCounter, &counterType, &value); ret == ErrorSuccess { - if value.CStatus == PdhCstatusValidData || value.CStatus == PdhCstatusNewData { - return value.DoubleValue, nil - } - return 0, NewPdhError(value.CStatus) + if ret := PdhGetFormattedCounterValueDouble(hCounter, &counterType, &value); ret != ErrorSuccess { + return 0, NewPdhError(ret) } - return 0, NewPdhError(ret) + if value.CStatus == PdhCstatusValidData || value.CStatus == PdhCstatusNewData { + return value.DoubleValue, nil + } + return 0, NewPdhError(value.CStatus) } func (m *PerformanceQueryImpl) GetFormattedCounterArrayDouble(hCounter pdhCounterHandle) ([]CounterValue, error) { - var buffSize uint32 - var itemCount uint32 - var ret uint32 + for buflen := initialBufferSize; buflen <= m.maxBufferSize; buflen *= 2 { + buf := make([]byte, buflen) - if ret = PdhGetFormattedCounterArrayDouble(hCounter, &buffSize, &itemCount, nil); ret == PdhMoreData { - buff := make([]byte, buffSize) - - if ret = PdhGetFormattedCounterArrayDouble(hCounter, &buffSize, &itemCount, &buff[0]); ret == ErrorSuccess { + // Get the info with the current buffer size + var itemCount uint32 + size := buflen + ret := PdhGetFormattedCounterArrayDouble(hCounter, &size, &itemCount, &buf[0]) + if ret == ErrorSuccess { //nolint:gosec // G103: Valid use of unsafe call to create PDH_FMT_COUNTERVALUE_ITEM_DOUBLE - items := (*[1 << 20]PdhFmtCountervalueItemDouble)(unsafe.Pointer(&buff[0]))[:itemCount] + items := (*[1 << 20]PdhFmtCountervalueItemDouble)(unsafe.Pointer(&buf[0]))[:itemCount] values := make([]CounterValue, 0, itemCount) for _, item := range items { if item.FmtValue.CStatus == PdhCstatusValidData || item.FmtValue.CStatus == PdhCstatusNewData { @@ -191,21 +214,32 @@ func (m *PerformanceQueryImpl) GetFormattedCounterArrayDouble(hCounter pdhCounte } return values, nil } + + // Use the size as a hint if it exceeds the current buffer size + if size > buflen { + buflen = size + } + + // We got a non-recoverable error so exit here + if ret != PdhMoreData { + return nil, NewPdhError(ret) + } } - return nil, NewPdhError(ret) + + return nil, errBufferLimitReached } func (m *PerformanceQueryImpl) GetRawCounterArray(hCounter pdhCounterHandle) ([]CounterValue, error) { - var buffSize uint32 - var itemCount uint32 - var ret uint32 + for buflen := initialBufferSize; buflen <= m.maxBufferSize; buflen *= 2 { + buf := make([]byte, buflen) - if ret = PdhGetRawCounterArray(hCounter, &buffSize, &itemCount, nil); ret == PdhMoreData { - buff := make([]byte, buffSize) - - if ret = PdhGetRawCounterArray(hCounter, &buffSize, &itemCount, &buff[0]); ret == ErrorSuccess { + // Get the info with the current buffer size + var itemCount uint32 + size := buflen + ret := PdhGetRawCounterArray(hCounter, &size, &itemCount, &buf[0]) + if ret == ErrorSuccess { //nolint:gosec // G103: Valid use of unsafe call to create PDH_RAW_COUNTER_ITEM - items := (*[1 << 20]PdhRawCounterItem)(unsafe.Pointer(&buff[0]))[:itemCount] + items := (*[1 << 20]PdhRawCounterItem)(unsafe.Pointer(&buf[0]))[:itemCount] values := make([]CounterValue, 0, itemCount) for _, item := range items { if item.RawValue.CStatus == PdhCstatusValidData || item.RawValue.CStatus == PdhCstatusNewData { @@ -215,8 +249,19 @@ func (m *PerformanceQueryImpl) GetRawCounterArray(hCounter pdhCounterHandle) ([] } return values, nil } + + // Use the size as a hint if it exceeds the current buffer size + if size > buflen { + buflen = size + } + + // We got a non-recoverable error so exit here + if ret != PdhMoreData { + return nil, NewPdhError(ret) + } } - return nil, NewPdhError(ret) + + return nil, errBufferLimitReached } func (m *PerformanceQueryImpl) CollectData() error { diff --git a/plugins/inputs/win_perf_counters/sample.conf b/plugins/inputs/win_perf_counters/sample.conf index 4736ab11b8bc0..46efab1174efa 100644 --- a/plugins/inputs/win_perf_counters/sample.conf +++ b/plugins/inputs/win_perf_counters/sample.conf @@ -41,6 +41,10 @@ ## e.g. IgnoredErrors = ["PDH_NO_DATA"] # IgnoredErrors = [] + ## Maximum size of the buffer for values returned by the API + ## Increase this value if you experience "buffer limit reached" errors. + # MaxBufferSize = "4MiB" + ## NOTE: Due to the way TOML is parsed, tables must be at the END of the ## plugin definition, otherwise additional config options are read as part of ## the table diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 37c3bec0764a3..f9476ac277f9d 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -7,6 +7,7 @@ import ( _ "embed" "errors" "fmt" + "math" "os" "strings" "sync" @@ -20,6 +21,8 @@ import ( //go:embed sample.conf var sampleConfig string +var defaultMaxBufferSize = config.Size(100 * 1024 * 1024) + type WinPerfCounters struct { PrintValid bool `toml:"PrintValid"` PreVistaSupport bool `toml:"PreVistaSupport" deprecated:"1.7.0;determined dynamically"` @@ -29,6 +32,7 @@ type WinPerfCounters struct { UseWildcardsExpansion bool LocalizeWildcardsExpansion bool IgnoredErrors []string `toml:"IgnoredErrors"` + MaxBufferSize config.Size Sources []string Log telegraf.Logger @@ -207,7 +211,7 @@ func (m *WinPerfCounters) AddItem(counterPath, computer, objectName, instance, c if !ok { hostCounter = &hostCountersInfo{computer: computer, tag: sourceTag} m.hostCounters[computer] = hostCounter - hostCounter.query = m.queryCreator.NewPerformanceQuery(computer) + hostCounter.query = m.queryCreator.NewPerformanceQuery(computer, uint32(m.MaxBufferSize)) if err := hostCounter.query.Open(); err != nil { return err } @@ -579,9 +583,16 @@ func isKnownCounterDataError(err error) bool { } func (m *WinPerfCounters) Init() error { + // Check the buffer size + if m.MaxBufferSize < config.Size(initialBufferSize) { + return fmt.Errorf("maximum buffer size should at least be %d", 2*initialBufferSize) + } + if m.MaxBufferSize > math.MaxUint32 { + return fmt.Errorf("maximum buffer size should be smaller than %d", uint32(math.MaxUint32)) + } + if m.UseWildcardsExpansion && !m.LocalizeWildcardsExpansion { // Counters must not have wildcards with this option - found := false wildcards := []string{"*", "?"} @@ -614,6 +625,7 @@ func init() { return &WinPerfCounters{ CountersRefreshInterval: config.Duration(time.Second * 60), LocalizeWildcardsExpansion: true, + MaxBufferSize: defaultMaxBufferSize, queryCreator: &PerformanceQueryCreatorImpl{}, } }) diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index d2804af726990..b08576163b8c4 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -18,7 +18,7 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } - query := &PerformanceQueryImpl{} + query := &PerformanceQueryImpl{maxBufferSize: uint32(defaultMaxBufferSize)} err := query.Close() require.Error(t, err, "uninitialized query must return errors") @@ -62,28 +62,28 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { fcounter, err := query.GetFormattedCounterValueDouble(hCounter) require.NoError(t, err) - require.True(t, fcounter > 0) + require.Greater(t, fcounter, float64(0)) rcounter, err := query.GetRawCounterValue(hCounter) require.NoError(t, err) - require.True(t, rcounter > 10000000) + require.Greater(t, rcounter, int64(10000000)) now := time.Now() mtime, err := query.CollectDataWithTime() require.NoError(t, err) - require.True(t, mtime.Sub(now) < time.Second) + require.Less(t, mtime.Sub(now), time.Second) counterPath = "\\Process(*)\\% Processor Time" paths, err := query.ExpandWildCardPath(counterPath) require.NoError(t, err) require.NotNil(t, paths) - require.True(t, len(paths) > 1) + require.Greater(t, len(paths), 1) //nolint:testifylint // https://github.com/Antonboom/testifylint/issues/6 counterPath = "\\Process(_Total)\\*" paths, err = query.ExpandWildCardPath(counterPath) require.NoError(t, err) require.NotNil(t, paths) - require.True(t, len(paths) > 1) + require.Greater(t, len(paths), 1) //nolint:testifylint // https://github.com/Antonboom/testifylint/issues/6 require.NoError(t, query.Open()) @@ -104,11 +104,11 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { farr, err = query.GetFormattedCounterArrayDouble(hCounter) } require.NoError(t, err) - require.True(t, len(farr) > 0) + require.NotEmpty(t, farr) rarr, err := query.GetRawCounterArray(hCounter) require.NoError(t, err) - require.True(t, len(rarr) > 0, "Too") + require.NotEmpty(t, rarr, "Too") require.NoError(t, query.Close()) } @@ -131,10 +131,11 @@ func TestWinPerfCountersConfigGet1Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.NoError(t, m.ParseConfig()) @@ -158,10 +159,11 @@ func TestWinPerfCountersConfigGet2Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.NoError(t, m.ParseConfig()) @@ -200,10 +202,11 @@ func TestWinPerfCountersConfigGet3Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.NoError(t, m.ParseConfig()) @@ -240,10 +243,11 @@ func TestWinPerfCountersConfigGet4Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.NoError(t, m.ParseConfig()) @@ -280,10 +284,11 @@ func TestWinPerfCountersConfigGet5Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.NoError(t, m.ParseConfig()) @@ -320,10 +325,11 @@ func TestWinPerfCountersConfigGet6Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.NoError(t, m.ParseConfig()) @@ -347,10 +353,11 @@ func TestWinPerfCountersConfigGet7Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.NoError(t, m.ParseConfig()) @@ -387,10 +394,11 @@ func TestWinPerfCountersConfigError1Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.Error(t, m.ParseConfig()) @@ -414,10 +422,11 @@ func TestWinPerfCountersConfigError2Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.NoError(t, m.ParseConfig()) @@ -443,10 +452,11 @@ func TestWinPerfCountersConfigError3Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } require.Error(t, m.ParseConfig()) @@ -470,10 +480,11 @@ func TestWinPerfCountersCollect1Integration(t *testing.T) { }} m := WinPerfCounters{ - PrintValid: false, - Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, - Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + MaxBufferSize: defaultMaxBufferSize, + Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } var acc testutil.Accumulator @@ -510,9 +521,10 @@ func TestWinPerfCountersCollect2Integration(t *testing.T) { PrintValid: false, UsePerfCounterTime: true, Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, UseWildcardsExpansion: true, + MaxBufferSize: defaultMaxBufferSize, Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } var acc testutil.Accumulator @@ -550,46 +562,48 @@ func TestWinPerfCountersCollectRawIntegration(t *testing.T) { m := WinPerfCounters{ PrintValid: false, Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, UseWildcardsExpansion: true, + MaxBufferSize: defaultMaxBufferSize, Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } var acc testutil.Accumulator require.NoError(t, m.Gather(&acc)) time.Sleep(2000 * time.Millisecond) require.NoError(t, m.Gather(&acc)) - require.True(t, len(acc.Metrics) > 1) + require.Greater(t, len(acc.Metrics), 1) //nolint:testifylint // https://github.com/Antonboom/testifylint/issues/6 expectedCounter := "Percent_Idle_Time_Raw" for _, metric := range acc.Metrics { val, ok := metric.Fields[expectedCounter] require.True(t, ok, "Expected presence of %s field", expectedCounter) valInt64, ok := val.(int64) - require.True(t, ok, fmt.Sprintf("Expected int64, got %T", val)) - require.True(t, valInt64 > 0, fmt.Sprintf("Expected > 0, got %d, for %#v", valInt64, metric)) + require.Truef(t, ok, "Expected int64, got %T", val) + require.Greaterf(t, valInt64, int64(0), "Expected > 0, got %d, for %#v", valInt64, metric) } // Test *Array way m = WinPerfCounters{ PrintValid: false, Object: perfObjects, - queryCreator: &PerformanceQueryCreatorImpl{}, UseWildcardsExpansion: false, + MaxBufferSize: defaultMaxBufferSize, Log: testutil.Logger{}, + queryCreator: &PerformanceQueryCreatorImpl{}, } var acc2 testutil.Accumulator require.NoError(t, m.Gather(&acc)) time.Sleep(2000 * time.Millisecond) require.NoError(t, m.Gather(&acc2)) - require.True(t, len(acc2.Metrics) > 1) + require.Greater(t, len(acc2.Metrics), 1) //nolint:testifylint // https://github.com/Antonboom/testifylint/issues/6 for _, metric := range acc2.Metrics { val, ok := metric.Fields[expectedCounter] require.True(t, ok, "Expected presence of %s field", expectedCounter) valInt64, ok := val.(int64) - require.True(t, ok, fmt.Sprintf("Expected int64, got %T", val)) - require.True(t, valInt64 > 0, fmt.Sprintf("Expected > 0, got %d, for %#v", valInt64, metric)) + require.Truef(t, ok, "Expected int64, got %T", val) + require.Greaterf(t, valInt64, int64(0), "Expected > 0, got %d, for %#v", valInt64, metric) } } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 6ed499f0d41e7..53e0e1bc8586d 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -214,7 +214,7 @@ type FakePerformanceQueryCreator struct { fakeQueries map[string]*FakePerformanceQuery } -func (m FakePerformanceQueryCreator) NewPerformanceQuery(computer string) PerformanceQuery { +func (m FakePerformanceQueryCreator) NewPerformanceQuery(computer string, _ uint32) PerformanceQuery { var ret PerformanceQuery var ok bool if ret, ok = m.fakeQueries[computer]; !ok { @@ -316,7 +316,7 @@ func TestCounterPathParsing(t *testing.T) { for path, vals := range counterPathsAndRes { h, o, i, c, err := extractCounterInfoFromCounterPath(path) require.NoError(t, err) - require.Equalf(t, vals, []string{h, o, i, c}, "arrays: %#v and %#v are not equal", vals, []string{o, i, c}) + require.Equalf(t, []string{h, o, i, c}, vals, "arrays: %#v and %#v are not equal", vals, []string{o, i, c}) } for _, path := range invalidCounterPaths { _, _, _, _, err := extractCounterInfoFromCounterPath(path) @@ -347,11 +347,11 @@ func TestAddItemSimple(t *testing.T) { counters, ok := m.hostCounters["localhost"] require.True(t, ok) require.Len(t, counters.counters, 1) - require.True(t, counters.counters[0].computer == "localhost") - require.True(t, counters.counters[0].objectName == "O") - require.True(t, counters.counters[0].instance == "I") - require.True(t, counters.counters[0].counter == "c") - require.True(t, counters.counters[0].measurement == "test") + require.Equal(t, "localhost", counters.counters[0].computer) + require.Equal(t, "O", counters.counters[0].objectName) + require.Equal(t, "I", counters.counters[0].instance) + require.Equal(t, "c", counters.counters[0].counter) + require.Equal(t, "test", counters.counters[0].measurement) require.False(t, counters.counters[0].includeTotal) } @@ -501,161 +501,161 @@ func TestParseConfigMultiComps(t *testing.T) { counters, ok := m.hostCounters["localhost"] require.True(t, ok) require.Len(t, counters.counters, 8) - require.True(t, counters.tag == hostname()) - require.True(t, counters.counters[0].computer == "localhost") - require.True(t, counters.counters[0].objectName == "O") - require.True(t, counters.counters[0].instance == "I") - require.True(t, counters.counters[0].counter == "C") - require.True(t, counters.counters[0].measurement == "m") - require.True(t, !counters.counters[0].includeTotal) - require.True(t, counters.counters[1].computer == "localhost") - require.True(t, counters.counters[1].objectName == "O1") - require.True(t, counters.counters[1].instance == "I1") - require.True(t, counters.counters[1].counter == "C1") - require.True(t, counters.counters[1].measurement == "m") - require.True(t, !counters.counters[1].includeTotal) - require.True(t, counters.counters[2].computer == "localhost") - require.True(t, counters.counters[2].objectName == "O1") - require.True(t, counters.counters[2].instance == "I2") - require.True(t, counters.counters[2].counter == "C1") - require.True(t, counters.counters[2].measurement == "m") - require.True(t, !counters.counters[2].includeTotal) - require.True(t, counters.counters[3].computer == "localhost") - require.True(t, counters.counters[3].objectName == "O1") - require.True(t, counters.counters[3].instance == "I1") - require.True(t, counters.counters[3].counter == "C2") - require.True(t, counters.counters[3].measurement == "m") - require.True(t, !counters.counters[3].includeTotal) - require.True(t, counters.counters[4].computer == "localhost") - require.True(t, counters.counters[4].objectName == "O1") - require.True(t, counters.counters[4].instance == "I2") - require.True(t, counters.counters[4].counter == "C2") - require.True(t, counters.counters[4].measurement == "m") - require.True(t, !counters.counters[4].includeTotal) - require.True(t, counters.counters[5].computer == "localhost") - require.True(t, counters.counters[5].objectName == "O2") - require.True(t, counters.counters[5].instance == "I") - require.True(t, counters.counters[5].counter == "C1") - require.True(t, counters.counters[5].measurement == "m") - require.True(t, !counters.counters[5].includeTotal) - require.True(t, counters.counters[6].computer == "localhost") - require.True(t, counters.counters[6].objectName == "O2") - require.True(t, counters.counters[6].instance == "I") - require.True(t, counters.counters[6].counter == "C2") - require.True(t, counters.counters[6].measurement == "m") - require.True(t, !counters.counters[6].includeTotal) - require.True(t, counters.counters[7].computer == "localhost") - require.True(t, counters.counters[7].objectName == "O2") - require.True(t, counters.counters[7].instance == "I") - require.True(t, counters.counters[7].counter == "C3") - require.True(t, counters.counters[7].measurement == "m") - require.True(t, !counters.counters[7].includeTotal) + require.Equal(t, counters.tag, hostname()) + require.Equal(t, "localhost", counters.counters[0].computer) + require.Equal(t, "O", counters.counters[0].objectName) + require.Equal(t, "I", counters.counters[0].instance) + require.Equal(t, "C", counters.counters[0].counter) + require.Equal(t, "m", counters.counters[0].measurement) + require.False(t, counters.counters[0].includeTotal) + require.Equal(t, "localhost", counters.counters[1].computer) + require.Equal(t, "O1", counters.counters[1].objectName) + require.Equal(t, "I1", counters.counters[1].instance) + require.Equal(t, "C1", counters.counters[1].counter) + require.Equal(t, "m", counters.counters[1].measurement) + require.False(t, counters.counters[1].includeTotal) + require.Equal(t, "localhost", counters.counters[2].computer) + require.Equal(t, "O1", counters.counters[2].objectName) + require.Equal(t, "I2", counters.counters[2].instance) + require.Equal(t, "C1", counters.counters[2].counter) + require.Equal(t, "m", counters.counters[2].measurement) + require.False(t, counters.counters[2].includeTotal) + require.Equal(t, "localhost", counters.counters[3].computer) + require.Equal(t, "O1", counters.counters[3].objectName) + require.Equal(t, "I1", counters.counters[3].instance) + require.Equal(t, "C2", counters.counters[3].counter) + require.Equal(t, "m", counters.counters[3].measurement) + require.False(t, counters.counters[3].includeTotal) + require.Equal(t, "localhost", counters.counters[4].computer) + require.Equal(t, "O1", counters.counters[4].objectName) + require.Equal(t, "I2", counters.counters[4].instance) + require.Equal(t, "C2", counters.counters[4].counter) + require.Equal(t, "m", counters.counters[4].measurement) + require.False(t, counters.counters[4].includeTotal) + require.Equal(t, "localhost", counters.counters[5].computer) + require.Equal(t, "O2", counters.counters[5].objectName) + require.Equal(t, "I", counters.counters[5].instance) + require.Equal(t, "C1", counters.counters[5].counter) + require.Equal(t, "m", counters.counters[5].measurement) + require.False(t, counters.counters[5].includeTotal) + require.Equal(t, "localhost", counters.counters[6].computer) + require.Equal(t, "O2", counters.counters[6].objectName) + require.Equal(t, "I", counters.counters[6].instance) + require.Equal(t, "C2", counters.counters[6].counter) + require.Equal(t, "m", counters.counters[6].measurement) + require.False(t, counters.counters[6].includeTotal) + require.Equal(t, "localhost", counters.counters[7].computer) + require.Equal(t, "O2", counters.counters[7].objectName) + require.Equal(t, "I", counters.counters[7].instance) + require.Equal(t, "C3", counters.counters[7].counter) + require.Equal(t, "m", counters.counters[7].measurement) + require.False(t, counters.counters[7].includeTotal) counters, ok = m.hostCounters["cmp1"] require.True(t, ok) require.Len(t, counters.counters, 8) - require.True(t, counters.tag == "cmp1") - require.True(t, counters.counters[0].computer == "cmp1") - require.True(t, counters.counters[0].objectName == "O") - require.True(t, counters.counters[0].instance == "I") - require.True(t, counters.counters[0].counter == "C") - require.True(t, counters.counters[0].measurement == "m") - require.True(t, !counters.counters[0].includeTotal) - require.True(t, counters.counters[1].computer == "cmp1") - require.True(t, counters.counters[1].objectName == "O1") - require.True(t, counters.counters[1].instance == "I1") - require.True(t, counters.counters[1].counter == "C1") - require.True(t, counters.counters[1].measurement == "m") - require.True(t, !counters.counters[1].includeTotal) - require.True(t, counters.counters[2].computer == "cmp1") - require.True(t, counters.counters[2].objectName == "O1") - require.True(t, counters.counters[2].instance == "I2") - require.True(t, counters.counters[2].counter == "C1") - require.True(t, counters.counters[2].measurement == "m") - require.True(t, !counters.counters[2].includeTotal) - require.True(t, counters.counters[3].computer == "cmp1") - require.True(t, counters.counters[3].objectName == "O1") - require.True(t, counters.counters[3].instance == "I1") - require.True(t, counters.counters[3].counter == "C2") - require.True(t, counters.counters[3].measurement == "m") - require.True(t, !counters.counters[3].includeTotal) - require.True(t, counters.counters[4].computer == "cmp1") - require.True(t, counters.counters[4].objectName == "O1") - require.True(t, counters.counters[4].instance == "I2") - require.True(t, counters.counters[4].counter == "C2") - require.True(t, counters.counters[4].measurement == "m") - require.True(t, !counters.counters[4].includeTotal) - require.True(t, counters.counters[5].computer == "cmp1") - require.True(t, counters.counters[5].objectName == "O2") - require.True(t, counters.counters[5].instance == "I") - require.True(t, counters.counters[5].counter == "C1") - require.True(t, counters.counters[5].measurement == "m") - require.True(t, !counters.counters[5].includeTotal) - require.True(t, counters.counters[6].computer == "cmp1") - require.True(t, counters.counters[6].objectName == "O2") - require.True(t, counters.counters[6].instance == "I") - require.True(t, counters.counters[6].counter == "C2") - require.True(t, counters.counters[6].measurement == "m") - require.True(t, !counters.counters[6].includeTotal) - require.True(t, counters.counters[7].computer == "cmp1") - require.True(t, counters.counters[7].objectName == "O2") - require.True(t, counters.counters[7].instance == "I") - require.True(t, counters.counters[7].counter == "C3") - require.True(t, counters.counters[7].measurement == "m") - require.True(t, !counters.counters[7].includeTotal) + require.Equal(t, "cmp1", counters.tag) + require.Equal(t, "cmp1", counters.counters[0].computer) + require.Equal(t, "O", counters.counters[0].objectName) + require.Equal(t, "I", counters.counters[0].instance) + require.Equal(t, "C", counters.counters[0].counter) + require.Equal(t, "m", counters.counters[0].measurement) + require.False(t, counters.counters[0].includeTotal) + require.Equal(t, "cmp1", counters.counters[1].computer) + require.Equal(t, "O1", counters.counters[1].objectName) + require.Equal(t, "I1", counters.counters[1].instance) + require.Equal(t, "C1", counters.counters[1].counter) + require.Equal(t, "m", counters.counters[1].measurement) + require.False(t, counters.counters[1].includeTotal) + require.Equal(t, "cmp1", counters.counters[2].computer) + require.Equal(t, "O1", counters.counters[2].objectName) + require.Equal(t, "I2", counters.counters[2].instance) + require.Equal(t, "C1", counters.counters[2].counter) + require.Equal(t, "m", counters.counters[2].measurement) + require.False(t, counters.counters[2].includeTotal) + require.Equal(t, "cmp1", counters.counters[3].computer) + require.Equal(t, "O1", counters.counters[3].objectName) + require.Equal(t, "I1", counters.counters[3].instance) + require.Equal(t, "C2", counters.counters[3].counter) + require.Equal(t, "m", counters.counters[3].measurement) + require.False(t, counters.counters[3].includeTotal) + require.Equal(t, "cmp1", counters.counters[4].computer) + require.Equal(t, "O1", counters.counters[4].objectName) + require.Equal(t, "I2", counters.counters[4].instance) + require.Equal(t, "C2", counters.counters[4].counter) + require.Equal(t, "m", counters.counters[4].measurement) + require.False(t, counters.counters[4].includeTotal) + require.Equal(t, "cmp1", counters.counters[5].computer) + require.Equal(t, "O2", counters.counters[5].objectName) + require.Equal(t, "I", counters.counters[5].instance) + require.Equal(t, "C1", counters.counters[5].counter) + require.Equal(t, "m", counters.counters[5].measurement) + require.False(t, counters.counters[5].includeTotal) + require.Equal(t, "cmp1", counters.counters[6].computer) + require.Equal(t, "O2", counters.counters[6].objectName) + require.Equal(t, "I", counters.counters[6].instance) + require.Equal(t, "C2", counters.counters[6].counter) + require.Equal(t, "m", counters.counters[6].measurement) + require.False(t, counters.counters[6].includeTotal) + require.Equal(t, "cmp1", counters.counters[7].computer) + require.Equal(t, "O2", counters.counters[7].objectName) + require.Equal(t, "I", counters.counters[7].instance) + require.Equal(t, "C3", counters.counters[7].counter) + require.Equal(t, "m", counters.counters[7].measurement) + require.False(t, counters.counters[7].includeTotal) counters, ok = m.hostCounters["cmp2"] require.True(t, ok) require.Len(t, counters.counters, 8) - require.True(t, counters.tag == "cmp2") - require.True(t, counters.counters[0].computer == "cmp2") - require.True(t, counters.counters[0].objectName == "O") - require.True(t, counters.counters[0].instance == "I") - require.True(t, counters.counters[0].counter == "C") - require.True(t, counters.counters[0].measurement == "m") - require.True(t, !counters.counters[0].includeTotal) - require.True(t, counters.counters[1].computer == "cmp2") - require.True(t, counters.counters[1].objectName == "O1") - require.True(t, counters.counters[1].instance == "I1") - require.True(t, counters.counters[1].counter == "C1") - require.True(t, counters.counters[1].measurement == "m") - require.True(t, !counters.counters[1].includeTotal) - require.True(t, counters.counters[2].computer == "cmp2") - require.True(t, counters.counters[2].objectName == "O1") - require.True(t, counters.counters[2].instance == "I2") - require.True(t, counters.counters[2].counter == "C1") - require.True(t, counters.counters[2].measurement == "m") - require.True(t, !counters.counters[2].includeTotal) - require.True(t, counters.counters[3].computer == "cmp2") - require.True(t, counters.counters[3].objectName == "O1") - require.True(t, counters.counters[3].instance == "I1") - require.True(t, counters.counters[3].counter == "C2") - require.True(t, counters.counters[3].measurement == "m") - require.True(t, !counters.counters[3].includeTotal) - require.True(t, counters.counters[4].computer == "cmp2") - require.True(t, counters.counters[4].objectName == "O1") - require.True(t, counters.counters[4].instance == "I2") - require.True(t, counters.counters[4].counter == "C2") - require.True(t, counters.counters[4].measurement == "m") - require.True(t, !counters.counters[4].includeTotal) - require.True(t, counters.counters[5].computer == "cmp2") - require.True(t, counters.counters[5].objectName == "O2") - require.True(t, counters.counters[5].instance == "I") - require.True(t, counters.counters[5].counter == "C1") - require.True(t, counters.counters[5].measurement == "m") - require.True(t, !counters.counters[5].includeTotal) - require.True(t, counters.counters[6].computer == "cmp2") - require.True(t, counters.counters[6].objectName == "O2") - require.True(t, counters.counters[6].instance == "I") - require.True(t, counters.counters[6].counter == "C2") - require.True(t, counters.counters[6].measurement == "m") - require.True(t, !counters.counters[6].includeTotal) - require.True(t, counters.counters[7].computer == "cmp2") - require.True(t, counters.counters[7].objectName == "O2") - require.True(t, counters.counters[7].instance == "I") - require.True(t, counters.counters[7].counter == "C3") - require.True(t, counters.counters[7].measurement == "m") - require.True(t, !counters.counters[7].includeTotal) + require.Equal(t, "cmp2", counters.tag) + require.Equal(t, "cmp2", counters.counters[0].computer) + require.Equal(t, "O", counters.counters[0].objectName) + require.Equal(t, "I", counters.counters[0].instance) + require.Equal(t, "C", counters.counters[0].counter) + require.Equal(t, "m", counters.counters[0].measurement) + require.False(t, counters.counters[0].includeTotal) + require.Equal(t, "cmp2", counters.counters[1].computer) + require.Equal(t, "O1", counters.counters[1].objectName) + require.Equal(t, "I1", counters.counters[1].instance) + require.Equal(t, "C1", counters.counters[1].counter) + require.Equal(t, "m", counters.counters[1].measurement) + require.False(t, counters.counters[1].includeTotal) + require.Equal(t, "cmp2", counters.counters[2].computer) + require.Equal(t, "O1", counters.counters[2].objectName) + require.Equal(t, "I2", counters.counters[2].instance) + require.Equal(t, "C1", counters.counters[2].counter) + require.Equal(t, "m", counters.counters[2].measurement) + require.False(t, counters.counters[2].includeTotal) + require.Equal(t, "cmp2", counters.counters[3].computer) + require.Equal(t, "O1", counters.counters[3].objectName) + require.Equal(t, "I1", counters.counters[3].instance) + require.Equal(t, "C2", counters.counters[3].counter) + require.Equal(t, "m", counters.counters[3].measurement) + require.False(t, counters.counters[3].includeTotal) + require.Equal(t, "cmp2", counters.counters[4].computer) + require.Equal(t, "O1", counters.counters[4].objectName) + require.Equal(t, "I2", counters.counters[4].instance) + require.Equal(t, "C2", counters.counters[4].counter) + require.Equal(t, "m", counters.counters[4].measurement) + require.False(t, counters.counters[4].includeTotal) + require.Equal(t, "cmp2", counters.counters[5].computer) + require.Equal(t, "O2", counters.counters[5].objectName) + require.Equal(t, "I", counters.counters[5].instance) + require.Equal(t, "C1", counters.counters[5].counter) + require.Equal(t, "m", counters.counters[5].measurement) + require.False(t, counters.counters[5].includeTotal) + require.Equal(t, "cmp2", counters.counters[6].computer) + require.Equal(t, "O2", counters.counters[6].objectName) + require.Equal(t, "I", counters.counters[6].instance) + require.Equal(t, "C2", counters.counters[6].counter) + require.Equal(t, "m", counters.counters[6].measurement) + require.False(t, counters.counters[6].includeTotal) + require.Equal(t, "cmp2", counters.counters[7].computer) + require.Equal(t, "O2", counters.counters[7].objectName) + require.Equal(t, "I", counters.counters[7].instance) + require.Equal(t, "C3", counters.counters[7].counter) + require.Equal(t, "m", counters.counters[7].measurement) + require.False(t, counters.counters[7].includeTotal) } func TestParseConfigMultiCompsOverrideMultiplePerfObjects(t *testing.T) { @@ -720,86 +720,86 @@ func TestParseConfigMultiCompsOverrideMultiplePerfObjects(t *testing.T) { counters, ok := m.hostCounters["localhost"] require.True(t, ok) require.Len(t, counters.counters, 4) - require.True(t, counters.counters[0].computer == "localhost") - require.True(t, counters.counters[0].objectName == "O") - require.True(t, counters.counters[0].instance == "I1") - require.True(t, counters.counters[0].counter == "C1") - require.True(t, counters.counters[0].measurement == "m") - require.True(t, !counters.counters[0].includeTotal) - require.True(t, counters.counters[1].computer == "localhost") - require.True(t, counters.counters[1].objectName == "O") - require.True(t, counters.counters[1].instance == "I2") - require.True(t, counters.counters[1].counter == "C1") - require.True(t, counters.counters[1].measurement == "m") - require.True(t, !counters.counters[1].includeTotal) - require.True(t, counters.counters[2].computer == "localhost") - require.True(t, counters.counters[2].objectName == "O") - require.True(t, counters.counters[2].instance == "I1") - require.True(t, counters.counters[2].counter == "C2") - require.True(t, counters.counters[2].measurement == "m") - require.True(t, !counters.counters[2].includeTotal) - require.True(t, counters.counters[3].computer == "localhost") - require.True(t, counters.counters[3].objectName == "O") - require.True(t, counters.counters[3].instance == "I2") - require.True(t, counters.counters[3].counter == "C2") - require.True(t, counters.counters[3].measurement == "m") - require.True(t, !counters.counters[3].includeTotal) + require.Equal(t, "localhost", counters.counters[0].computer) + require.Equal(t, "O", counters.counters[0].objectName) + require.Equal(t, "I1", counters.counters[0].instance) + require.Equal(t, "C1", counters.counters[0].counter) + require.Equal(t, "m", counters.counters[0].measurement) + require.False(t, counters.counters[0].includeTotal) + require.Equal(t, "localhost", counters.counters[1].computer) + require.Equal(t, "O", counters.counters[1].objectName) + require.Equal(t, "I2", counters.counters[1].instance) + require.Equal(t, "C1", counters.counters[1].counter) + require.Equal(t, "m", counters.counters[1].measurement) + require.False(t, counters.counters[1].includeTotal) + require.Equal(t, "localhost", counters.counters[2].computer) + require.Equal(t, "O", counters.counters[2].objectName) + require.Equal(t, "I1", counters.counters[2].instance) + require.Equal(t, "C2", counters.counters[2].counter) + require.Equal(t, "m", counters.counters[2].measurement) + require.False(t, counters.counters[2].includeTotal) + require.Equal(t, "localhost", counters.counters[3].computer) + require.Equal(t, "O", counters.counters[3].objectName) + require.Equal(t, "I2", counters.counters[3].instance) + require.Equal(t, "C2", counters.counters[3].counter) + require.Equal(t, "m", counters.counters[3].measurement) + require.False(t, counters.counters[3].includeTotal) counters, ok = m.hostCounters["cmp1"] require.True(t, ok) require.Len(t, counters.counters, 4) - require.True(t, counters.counters[0].computer == "cmp1") - require.True(t, counters.counters[0].objectName == "O1") - require.True(t, counters.counters[0].instance == "I1") - require.True(t, counters.counters[0].counter == "C1") - require.True(t, counters.counters[0].measurement == "m") - require.True(t, !counters.counters[0].includeTotal) - require.True(t, counters.counters[1].computer == "cmp1") - require.True(t, counters.counters[1].objectName == "O1") - require.True(t, counters.counters[1].instance == "I2") - require.True(t, counters.counters[1].counter == "C1") - require.True(t, counters.counters[1].measurement == "m") - require.True(t, !counters.counters[1].includeTotal) - require.True(t, counters.counters[2].computer == "cmp1") - require.True(t, counters.counters[2].objectName == "O1") - require.True(t, counters.counters[2].instance == "I1") - require.True(t, counters.counters[2].counter == "C2") - require.True(t, counters.counters[2].measurement == "m") - require.True(t, !counters.counters[2].includeTotal) - require.True(t, counters.counters[3].computer == "cmp1") - require.True(t, counters.counters[3].objectName == "O1") - require.True(t, counters.counters[3].instance == "I2") - require.True(t, counters.counters[3].counter == "C2") - require.True(t, counters.counters[3].measurement == "m") - require.True(t, !counters.counters[3].includeTotal) + require.Equal(t, "cmp1", counters.counters[0].computer) + require.Equal(t, "O1", counters.counters[0].objectName) + require.Equal(t, "I1", counters.counters[0].instance) + require.Equal(t, "C1", counters.counters[0].counter) + require.Equal(t, "m", counters.counters[0].measurement) + require.False(t, counters.counters[0].includeTotal) + require.Equal(t, "cmp1", counters.counters[1].computer) + require.Equal(t, "O1", counters.counters[1].objectName) + require.Equal(t, "I2", counters.counters[1].instance) + require.Equal(t, "C1", counters.counters[1].counter) + require.Equal(t, "m", counters.counters[1].measurement) + require.False(t, counters.counters[1].includeTotal) + require.Equal(t, "cmp1", counters.counters[2].computer) + require.Equal(t, "O1", counters.counters[2].objectName) + require.Equal(t, "I1", counters.counters[2].instance) + require.Equal(t, "C2", counters.counters[2].counter) + require.Equal(t, "m", counters.counters[2].measurement) + require.False(t, counters.counters[2].includeTotal) + require.Equal(t, "cmp1", counters.counters[3].computer) + require.Equal(t, "O1", counters.counters[3].objectName) + require.Equal(t, "I2", counters.counters[3].instance) + require.Equal(t, "C2", counters.counters[3].counter) + require.Equal(t, "m", counters.counters[3].measurement) + require.False(t, counters.counters[3].includeTotal) counters, ok = m.hostCounters["cmp2"] require.True(t, ok) require.Len(t, counters.counters, 4) - require.True(t, counters.counters[0].computer == "cmp2") - require.True(t, counters.counters[0].objectName == "O2") - require.True(t, counters.counters[0].instance == "I1") - require.True(t, counters.counters[0].counter == "C1") - require.True(t, counters.counters[0].measurement == "m") - require.True(t, !counters.counters[0].includeTotal) - require.True(t, counters.counters[1].computer == "cmp2") - require.True(t, counters.counters[1].objectName == "O2") - require.True(t, counters.counters[1].instance == "I2") - require.True(t, counters.counters[1].counter == "C1") - require.True(t, counters.counters[1].measurement == "m") - require.True(t, !counters.counters[1].includeTotal) - require.True(t, counters.counters[2].computer == "cmp2") - require.True(t, counters.counters[2].objectName == "O2") - require.True(t, counters.counters[2].instance == "I1") - require.True(t, counters.counters[2].counter == "C2") - require.True(t, counters.counters[2].measurement == "m") - require.True(t, !counters.counters[2].includeTotal) - require.True(t, counters.counters[3].computer == "cmp2") - require.True(t, counters.counters[3].objectName == "O2") - require.True(t, counters.counters[3].instance == "I2") - require.True(t, counters.counters[3].counter == "C2") - require.True(t, counters.counters[3].measurement == "m") - require.True(t, !counters.counters[3].includeTotal) + require.Equal(t, "cmp2", counters.counters[0].computer) + require.Equal(t, "O2", counters.counters[0].objectName) + require.Equal(t, "I1", counters.counters[0].instance) + require.Equal(t, "C1", counters.counters[0].counter) + require.Equal(t, "m", counters.counters[0].measurement) + require.False(t, counters.counters[0].includeTotal) + require.Equal(t, "cmp2", counters.counters[1].computer) + require.Equal(t, "O2", counters.counters[1].objectName) + require.Equal(t, "I2", counters.counters[1].instance) + require.Equal(t, "C1", counters.counters[1].counter) + require.Equal(t, "m", counters.counters[1].measurement) + require.False(t, counters.counters[1].includeTotal) + require.Equal(t, "cmp2", counters.counters[2].computer) + require.Equal(t, "O2", counters.counters[2].objectName) + require.Equal(t, "I1", counters.counters[2].instance) + require.Equal(t, "C2", counters.counters[2].counter) + require.Equal(t, "m", counters.counters[2].measurement) + require.False(t, counters.counters[2].includeTotal) + require.Equal(t, "cmp2", counters.counters[3].computer) + require.Equal(t, "O2", counters.counters[3].objectName) + require.Equal(t, "I2", counters.counters[3].instance) + require.Equal(t, "C2", counters.counters[3].counter) + require.Equal(t, "m", counters.counters[3].measurement) + require.False(t, counters.counters[3].includeTotal) } func TestParseConfigMultiCompsOverrideOnePerfObject(t *testing.T) { @@ -871,77 +871,77 @@ func TestParseConfigMultiCompsOverrideOnePerfObject(t *testing.T) { counters, ok := m.hostCounters["localhost"] require.True(t, ok) require.Len(t, counters.counters, 1) - require.True(t, counters.tag == hostname()) - require.True(t, counters.counters[0].computer == "localhost") - require.True(t, counters.counters[0].objectName == "O1") - require.True(t, counters.counters[0].instance == "I") - require.True(t, counters.counters[0].counter == "C") - require.True(t, counters.counters[0].measurement == "m") - require.True(t, !counters.counters[0].includeTotal) + require.Equal(t, counters.tag, hostname()) + require.Equal(t, "localhost", counters.counters[0].computer) + require.Equal(t, "O1", counters.counters[0].objectName) + require.Equal(t, "I", counters.counters[0].instance) + require.Equal(t, "C", counters.counters[0].counter) + require.Equal(t, "m", counters.counters[0].measurement) + require.False(t, counters.counters[0].includeTotal) counters, ok = m.hostCounters["cmp1"] require.True(t, ok) require.Len(t, counters.counters, 5) - require.True(t, counters.tag == "cmp1") - require.True(t, counters.counters[0].computer == "cmp1") - require.True(t, counters.counters[0].objectName == "O") - require.True(t, counters.counters[0].instance == "I1") - require.True(t, counters.counters[0].counter == "C1") - require.True(t, counters.counters[0].measurement == "m") - require.True(t, !counters.counters[0].includeTotal) - require.True(t, counters.counters[0].computer == "cmp1") - require.True(t, counters.counters[1].objectName == "O") - require.True(t, counters.counters[1].instance == "I2") - require.True(t, counters.counters[1].counter == "C1") - require.True(t, counters.counters[1].measurement == "m") - require.True(t, !counters.counters[1].includeTotal) - require.True(t, counters.counters[2].computer == "cmp1") - require.True(t, counters.counters[2].objectName == "O") - require.True(t, counters.counters[2].instance == "I1") - require.True(t, counters.counters[2].counter == "C2") - require.True(t, counters.counters[2].measurement == "m") - require.True(t, !counters.counters[2].includeTotal) - require.True(t, counters.counters[3].computer == "cmp1") - require.True(t, counters.counters[3].objectName == "O") - require.True(t, counters.counters[3].instance == "I2") - require.True(t, counters.counters[3].counter == "C2") - require.True(t, counters.counters[3].measurement == "m") - require.True(t, !counters.counters[3].includeTotal) - require.True(t, counters.counters[4].computer == "cmp1") - require.True(t, counters.counters[4].objectName == "O1") - require.True(t, counters.counters[4].instance == "I") - require.True(t, counters.counters[4].counter == "C") - require.True(t, counters.counters[4].measurement == "m") - require.True(t, !counters.counters[4].includeTotal) + require.Equal(t, "cmp1", counters.tag) + require.Equal(t, "cmp1", counters.counters[0].computer) + require.Equal(t, "O", counters.counters[0].objectName) + require.Equal(t, "I1", counters.counters[0].instance) + require.Equal(t, "C1", counters.counters[0].counter) + require.Equal(t, "m", counters.counters[0].measurement) + require.False(t, counters.counters[0].includeTotal) + require.Equal(t, "cmp1", counters.counters[0].computer) + require.Equal(t, "O", counters.counters[1].objectName) + require.Equal(t, "I2", counters.counters[1].instance) + require.Equal(t, "C1", counters.counters[1].counter) + require.Equal(t, "m", counters.counters[1].measurement) + require.False(t, counters.counters[1].includeTotal) + require.Equal(t, "cmp1", counters.counters[2].computer) + require.Equal(t, "O", counters.counters[2].objectName) + require.Equal(t, "I1", counters.counters[2].instance) + require.Equal(t, "C2", counters.counters[2].counter) + require.Equal(t, "m", counters.counters[2].measurement) + require.False(t, counters.counters[2].includeTotal) + require.Equal(t, "cmp1", counters.counters[3].computer) + require.Equal(t, "O", counters.counters[3].objectName) + require.Equal(t, "I2", counters.counters[3].instance) + require.Equal(t, "C2", counters.counters[3].counter) + require.Equal(t, "m", counters.counters[3].measurement) + require.False(t, counters.counters[3].includeTotal) + require.Equal(t, "cmp1", counters.counters[4].computer) + require.Equal(t, "O1", counters.counters[4].objectName) + require.Equal(t, "I", counters.counters[4].instance) + require.Equal(t, "C", counters.counters[4].counter) + require.Equal(t, "m", counters.counters[4].measurement) + require.False(t, counters.counters[4].includeTotal) counters, ok = m.hostCounters["cmp2"] require.True(t, ok) require.Len(t, counters.counters, 4) - require.True(t, counters.tag == "cmp2") - require.True(t, counters.counters[0].computer == "cmp2") - require.True(t, counters.counters[0].objectName == "O") - require.True(t, counters.counters[0].instance == "I1") - require.True(t, counters.counters[0].counter == "C1") - require.True(t, counters.counters[0].measurement == "m") - require.True(t, !counters.counters[0].includeTotal) - require.True(t, counters.counters[1].computer == "cmp2") - require.True(t, counters.counters[1].objectName == "O") - require.True(t, counters.counters[1].instance == "I2") - require.True(t, counters.counters[1].counter == "C1") - require.True(t, counters.counters[1].measurement == "m") - require.True(t, !counters.counters[1].includeTotal) - require.True(t, counters.counters[2].computer == "cmp2") - require.True(t, counters.counters[2].objectName == "O") - require.True(t, counters.counters[2].instance == "I1") - require.True(t, counters.counters[2].counter == "C2") - require.True(t, counters.counters[2].measurement == "m") - require.True(t, !counters.counters[2].includeTotal) - require.True(t, counters.counters[3].computer == "cmp2") - require.True(t, counters.counters[3].objectName == "O") - require.True(t, counters.counters[3].instance == "I2") - require.True(t, counters.counters[3].counter == "C2") - require.True(t, counters.counters[3].measurement == "m") - require.True(t, !counters.counters[3].includeTotal) + require.Equal(t, "cmp2", counters.tag) + require.Equal(t, "cmp2", counters.counters[0].computer) + require.Equal(t, "O", counters.counters[0].objectName) + require.Equal(t, "I1", counters.counters[0].instance) + require.Equal(t, "C1", counters.counters[0].counter) + require.Equal(t, "m", counters.counters[0].measurement) + require.False(t, counters.counters[0].includeTotal) + require.Equal(t, "cmp2", counters.counters[1].computer) + require.Equal(t, "O", counters.counters[1].objectName) + require.Equal(t, "I2", counters.counters[1].instance) + require.Equal(t, "C1", counters.counters[1].counter) + require.Equal(t, "m", counters.counters[1].measurement) + require.False(t, counters.counters[1].includeTotal) + require.Equal(t, "cmp2", counters.counters[2].computer) + require.Equal(t, "O", counters.counters[2].objectName) + require.Equal(t, "I1", counters.counters[2].instance) + require.Equal(t, "C2", counters.counters[2].counter) + require.Equal(t, "m", counters.counters[2].measurement) + require.False(t, counters.counters[2].includeTotal) + require.Equal(t, "cmp2", counters.counters[3].computer) + require.Equal(t, "O", counters.counters[3].objectName) + require.Equal(t, "I2", counters.counters[3].instance) + require.Equal(t, "C2", counters.counters[3].counter) + require.Equal(t, "m", counters.counters[3].measurement) + require.False(t, counters.counters[3].includeTotal) } func TestParseConfigLocalhost(t *testing.T) { @@ -2043,6 +2043,7 @@ func TestLocalizeWildcardsExpansion(t *testing.T) { []string{"_Total"}, []string{counter}, true, false, false), LocalizeWildcardsExpansion: false, UseWildcardsExpansion: true, + MaxBufferSize: defaultMaxBufferSize, Log: testutil.Logger{}, } diff --git a/plugins/inputs/wireless/wireless_test.go b/plugins/inputs/wireless/wireless_test.go index f16c80ea6f8ad..71904c56625c7 100644 --- a/plugins/inputs/wireless/wireless_test.go +++ b/plugins/inputs/wireless/wireless_test.go @@ -52,5 +52,5 @@ func TestLoadWirelessTable(t *testing.T) { require.NoError(t, err) as := require.New(t) - as.Equal(metrics, expectedMetrics) + as.Equal(expectedMetrics, metrics) } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index a2ebab51bd69a..46d2aeea25f42 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -260,7 +260,7 @@ func TestGatherExcludeRootCerts(t *testing.T) { require.NoError(t, sc.Gather(&acc)) require.True(t, acc.HasMeasurement("x509_cert")) - require.Equal(t, acc.NMetrics(), uint64(1)) + require.Equal(t, uint64(1), acc.NMetrics()) } func TestGatherChain(t *testing.T) { diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md index 77448c93c7288..63e33985fc859 100644 --- a/plugins/inputs/zfs/README.md +++ b/plugins/inputs/zfs/README.md @@ -43,7 +43,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. By default this plugin collects metrics about ZFS internals pool and dataset. These metrics are either counters or measure sizes in bytes. These metrics will be in the `zfs` measurement with the field -names listed bellow. +names listed below. If `poolMetrics` is enabled then additional metrics will be gathered for each pool. @@ -52,7 +52,7 @@ If `datasetMetrics` is enabled then additional metrics will be gathered for each dataset. - zfs - With fields listed bellow. + With fields listed below. ### ARC Stats (FreeBSD and Linux) @@ -223,6 +223,27 @@ For ZFS >= 2.1.x the format has changed significantly: - nunlinks (integer, count) - nunlinked (integer, count) +For ZFS >= 2.2.x the following additional fields are available: + +- additional fields for ZFS > 2.2.x + - zil_commit_count (integer, count) + - zil_commit_writer_count (integer, count) + - zil_itx_count (integer, count) + - zil_itx_indirect_count (integer, count) + - zil_itx_indirect_bytes (integer, bytes) + - zil_itx_copied_count (integer, count) + - zil_itx_copied_bytes (integer, bytes) + - zil_itx_needcopy_count (integer, count) + - zil_itx_needcopy_bytes (integer, bytes) + - zil_itx_metaslab_normal_count (integer, count) + - zil_itx_metaslab_normal_bytes (integer, bytes) + - zil_itx_metaslab_normal_write (integer, bytes) + - zil_itx_metaslab_normal_alloc (integer, bytes) + - zil_itx_metaslab_slog_count (integer, count) + - zil_itx_metaslab_slog_bytes (integer, bytes) + - zil_itx_metaslab_slog_write (integer, bytes) + - zil_itx_metaslab_slog_alloc (integer, bytes) + On FreeBSD: - zfs_pool @@ -391,6 +412,7 @@ memory is too low) ### ZIL (Linux Only) -note: ZIL measurements are system-wide, neither per-pool nor per-dataset +note: `zil` measurements in `kstatMetrics` are system-wide, in `poolMetrics` +they are pool-wide `zil_commit_count` counts when ZFS transactions are committed to a ZIL diff --git a/plugins/inputs/zfs/zfs_linux.go b/plugins/inputs/zfs/zfs_linux.go index 1a5ab8297b883..7409c91cceeee 100644 --- a/plugins/inputs/zfs/zfs_linux.go +++ b/plugins/inputs/zfs/zfs_linux.go @@ -84,7 +84,7 @@ func getTags(pools []poolInfo) map[string]string { } func gather(lines []string, fileLines int) ([]string, []string, error) { - if len(lines) != fileLines { + if len(lines) < fileLines { return nil, nil, errors.New("expected lines in kstat does not match") } @@ -172,7 +172,7 @@ func gatherPoolStats(pool poolInfo, acc telegraf.Accumulator) error { } if gatherErr != nil { - return err + return gatherErr } acc.AddFields("zfs_pool", fields, tags) diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 20604df8dc9bc..ee45c45f4dd10 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -130,6 +130,33 @@ nread 4 1884160 nunlinks 4 14148 nunlinked 4 14147 ` +const objsetV22Contents = `36 1 0x01 7 2160 5214787391 74985931356512 +name type data +dataset_name 7 HOMEV22 +writes 4 978 +nwritten 4 6450688 +reads 4 22 +nread 4 1884160 +nunlinks 4 14148 +nunlinked 4 14147 +zil_commit_count 4 1 +zil_commit_writer_count 4 2 +zil_itx_count 4 3 +zil_itx_indirect_count 4 4 +zil_itx_indirect_bytes 4 5 +zil_itx_copied_count 4 6 +zil_itx_copied_bytes 4 7 +zil_itx_needcopy_count 4 8 +zil_itx_needcopy_bytes 4 9 +zil_itx_metaslab_normal_count 4 10 +zil_itx_metaslab_normal_bytes 4 11 +zil_itx_metaslab_normal_write 4 12 +zil_itx_metaslab_normal_alloc 4 13 +zil_itx_metaslab_slog_count 4 14 +zil_itx_metaslab_slog_bytes 4 15 +zil_itx_metaslab_slog_write 4 16 +zil_itx_metaslab_slog_alloc 4 17 +` const zilContents = `7 1 0x01 14 672 34118481334 437444452158445 name type data zil_commit_count 4 77 @@ -235,6 +262,8 @@ func TestZfsPoolMetrics(t *testing.T) { err = os.WriteFile(testKstatPath+"/HOME/objset-0x20a", []byte(objsetContents), 0640) require.NoError(t, err) + err = os.WriteFile(testKstatPath+"/HOME/objset-0x20b", []byte(objsetV22Contents), 0640) + require.NoError(t, err) acc.Metrics = nil @@ -242,9 +271,12 @@ func TestZfsPoolMetrics(t *testing.T) { require.NoError(t, err) tags["dataset"] = "HOME" - poolMetrics = getPoolMetricsNewFormat() acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags) + + tags["dataset"] = "HOMEV22" + poolMetrics = getPoolMetricsNewFormatV22() + acc.AssertContainsTaggedFields(t, "zfs_pool", poolMetrics, tags) } func TestZfsGeneratesMetrics(t *testing.T) { @@ -554,3 +586,31 @@ func getPoolMetricsNewFormat() map[string]interface{} { "writes": int64(978), } } + +func getPoolMetricsNewFormatV22() map[string]interface{} { + return map[string]interface{}{ + "nread": int64(1884160), + "nunlinked": int64(14147), + "nunlinks": int64(14148), + "nwritten": int64(6450688), + "reads": int64(22), + "writes": int64(978), + "zil_commit_count": int64(1), + "zil_commit_writer_count": int64(2), + "zil_itx_count": int64(3), + "zil_itx_indirect_count": int64(4), + "zil_itx_indirect_bytes": int64(5), + "zil_itx_copied_count": int64(6), + "zil_itx_copied_bytes": int64(7), + "zil_itx_needcopy_count": int64(8), + "zil_itx_needcopy_bytes": int64(9), + "zil_itx_metaslab_normal_count": int64(10), + "zil_itx_metaslab_normal_bytes": int64(11), + "zil_itx_metaslab_normal_write": int64(12), + "zil_itx_metaslab_normal_alloc": int64(13), + "zil_itx_metaslab_slog_count": int64(14), + "zil_itx_metaslab_slog_bytes": int64(15), + "zil_itx_metaslab_slog_write": int64(16), + "zil_itx_metaslab_slog_alloc": int64(17), + } +} diff --git a/plugins/outputs/cratedb/cratedb_test.go b/plugins/outputs/cratedb/cratedb_test.go index 20d84f3bd5f3e..d1fdf1688308d 100644 --- a/plugins/outputs/cratedb/cratedb_test.go +++ b/plugins/outputs/cratedb/cratedb_test.go @@ -168,14 +168,14 @@ func TestEscapeValue(t *testing.T) { for _, test := range tests { got, err := escapeValue(test.Value, "_") require.NoError(t, err, "value: %#v", test.Value) - require.Equal(t, got, test.Want) + require.Equal(t, test.Want, got) } } func TestCircumventingStringEscape(t *testing.T) { value, err := escapeObject(map[string]interface{}{"a.b": "c"}, `_"`) require.NoError(t, err) - require.Equal(t, value, `{"a_""b" = 'c'}`) + require.Equal(t, `{"a_""b" = 'c'}`, value) } func Test_hashID(t *testing.T) { diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index 802befa574f49..cdeb78a60c693 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -37,15 +37,15 @@ func TestSerializer(t *testing.T) { err = json.Unmarshal([]byte(r), &obj) require.NoError(t, err) - require.Equal(t, obj["version"], "1.1") - require.Equal(t, obj["_name"], "testing") - require.Equal(t, obj["_verb"], "GET") - require.Equal(t, obj["host"], "hostname") - require.Equal(t, obj["full_message"], "full") - require.Equal(t, obj["short_message"], "short") - require.Equal(t, obj["level"], "1") - require.Equal(t, obj["facility"], "demo") - require.Equal(t, obj["line"], "42") - require.Equal(t, obj["file"], "graylog.go") + require.Equal(t, "1.1", obj["version"]) + require.Equal(t, "testing", obj["_name"]) + require.Equal(t, "GET", obj["_verb"]) + require.Equal(t, "hostname", obj["host"]) + require.Equal(t, "full", obj["full_message"]) + require.Equal(t, "short", obj["short_message"]) + require.Equal(t, "1", obj["level"]) + require.Equal(t, "demo", obj["facility"]) + require.Equal(t, "42", obj["line"]) + require.Equal(t, "graylog.go", obj["file"]) } } diff --git a/plugins/outputs/graylog/graylog_test_linux.go b/plugins/outputs/graylog/graylog_test_linux.go index 563f81ac14e39..1a50086cffa6f 100644 --- a/plugins/outputs/graylog/graylog_test_linux.go +++ b/plugins/outputs/graylog/graylog_test_linux.go @@ -16,10 +16,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestWriteUDP(t *testing.T) { @@ -170,14 +171,14 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup, namefieldnoprefix bool) string if err != nil { return err } - require.Equal(t, obj["short_message"], "telegraf") + require.Equal(t, "telegraf", obj["short_message"]) if namefieldnoprefix { - require.Equal(t, obj["name"], "test1") + require.Equal(t, "test1", obj["name"]) } else { - require.Equal(t, obj["_name"], "test1") + require.Equal(t, "test1", obj["_name"]) } - require.Equal(t, obj["_tag1"], "value1") - require.Equal(t, obj["_value"], float64(1)) + require.Equal(t, "value1", obj["_tag1"]) + require.Equal(t, float64(1), obj["_value"]) return nil } @@ -247,10 +248,10 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup, tlsConfig *tls.Config, errs cha var obj GelfObject err = json.Unmarshal(bufW.Bytes(), &obj) require.NoError(t, err) - require.Equal(t, obj["short_message"], "telegraf") - require.Equal(t, obj["_name"], "test1") - require.Equal(t, obj["_tag1"], "value1") - require.Equal(t, obj["_value"], float64(1)) + require.Equal(t, "telegraf", obj["short_message"]) + require.Equal(t, "test1", obj["_name"]) + require.Equal(t, "value1", obj["_tag1"]) + require.Equal(t, float64(1), obj["_value"]) return nil } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index cd1a6e408a1b0..1231778b3f4f7 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -499,6 +499,34 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { w.WriteHeader(http.StatusOK) }, }, + { + name: "audience", + plugin: &HTTP{ + URL: u.String() + "/write", + HTTPClientConfig: httpconfig.HTTPClientConfig{ + OAuth2Config: oauth.OAuth2Config{ + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + Audience: "audience", + }, + }, + }, + tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + values := url.Values{} + values.Add("access_token", token) + values.Add("token_type", "bearer") + values.Add("expires_in", "3600") + _, err = w.Write([]byte(values.Encode())) + require.NoError(t, err) + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) + w.WriteHeader(http.StatusOK) + }, + }, } for _, tt := range tests { diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 0b9cfd399a862..316b007270f01 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -119,8 +119,8 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, database: `a " b`, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.Header.Get("A"), "B") - require.Equal(t, r.Header.Get("C"), "D") + require.Equal(t, "B", r.Header.Get("A")) + require.Equal(t, "D", r.Header.Get("C")) w.WriteHeader(http.StatusOK) _, err = w.Write(successResponse) require.NoError(t, err) @@ -137,8 +137,8 @@ func TestHTTP_CreateDatabase(t *testing.T) { Database: "telegraf", }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.Header.Get("A"), "B") - require.Equal(t, r.Header.Get("C"), "D") + require.Equal(t, "B", r.Header.Get("A")) + require.Equal(t, "D", r.Header.Get("C")) w.WriteHeader(http.StatusOK) _, err = w.Write(successResponse) require.NoError(t, err) @@ -291,7 +291,7 @@ func TestHTTP_Write(t *testing.T) { Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, "telegraf", r.FormValue("db")) body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -324,7 +324,7 @@ func TestHTTP_Write(t *testing.T) { Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.Header.Get("User-Agent"), "telegraf") + require.Equal(t, "telegraf", r.Header.Get("User-Agent")) w.WriteHeader(http.StatusNoContent) }, }, @@ -362,8 +362,8 @@ func TestHTTP_Write(t *testing.T) { Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.Header.Get("A"), "B") - require.Equal(t, r.Header.Get("C"), "D") + require.Equal(t, "B", r.Header.Get("A")) + require.Equal(t, "D", r.Header.Get("C")) w.WriteHeader(http.StatusNoContent) }, }, @@ -581,7 +581,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/write": - require.Equal(t, r.Header.Get("Content-Encoding"), "gzip") + require.Equal(t, "gzip", r.Header.Get("Content-Encoding")) gr, err := gzip.NewReader(r.Body) require.NoError(t, err) @@ -709,7 +709,7 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { case "/write": err := r.ParseForm() require.NoError(t, err) - require.Equal(t, r.Form["db"], []string{"foo"}) + require.Equal(t, []string{"foo"}, r.Form["db"]) body, err := io.ReadAll(r.Body) require.NoError(t, err) @@ -794,8 +794,8 @@ func TestDBRPTags(t *testing.T) { ), }, handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.FormValue("db"), "telegraf") - require.Equal(t, r.FormValue("rp"), "") + require.Equal(t, "telegraf", r.FormValue("db")) + require.Equal(t, "", r.FormValue("rp")) w.WriteHeader(http.StatusNoContent) }, }, @@ -817,8 +817,8 @@ func TestDBRPTags(t *testing.T) { ), }, handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.FormValue("db"), "telegraf") - require.Equal(t, r.FormValue("rp"), "foo") + require.Equal(t, "telegraf", r.FormValue("db")) + require.Equal(t, "foo", r.FormValue("rp")) w.WriteHeader(http.StatusNoContent) }, }, @@ -844,8 +844,8 @@ func TestDBRPTags(t *testing.T) { ), }, handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.FormValue("db"), "telegraf") - require.Equal(t, r.FormValue("rp"), "foo") + require.Equal(t, "telegraf", r.FormValue("db")) + require.Equal(t, "foo", r.FormValue("rp")) body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") @@ -873,8 +873,8 @@ func TestDBRPTags(t *testing.T) { ), }, handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.FormValue("db"), "telegraf") - require.Equal(t, r.FormValue("rp"), "foo") + require.Equal(t, "telegraf", r.FormValue("db")) + require.Equal(t, "foo", r.FormValue("rp")) w.WriteHeader(http.StatusNoContent) }, }, @@ -898,8 +898,8 @@ func TestDBRPTags(t *testing.T) { ), }, handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.FormValue("db"), "telegraf") - require.Equal(t, r.FormValue("rp"), "") + require.Equal(t, "telegraf", r.FormValue("db")) + require.Equal(t, "", r.FormValue("rp")) w.WriteHeader(http.StatusNoContent) }, }, @@ -926,8 +926,8 @@ func TestDBRPTags(t *testing.T) { ), }, handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.FormValue("db"), "telegraf") - require.Equal(t, r.FormValue("rp"), "foo") + require.Equal(t, "telegraf", r.FormValue("db")) + require.Equal(t, "foo", r.FormValue("rp")) body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -957,8 +957,8 @@ func TestDBRPTags(t *testing.T) { ), }, handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - require.Equal(t, r.FormValue("db"), "telegraf") - require.Equal(t, r.FormValue("rp"), "foo") + require.Equal(t, "telegraf", r.FormValue("db")) + require.Equal(t, "foo", r.FormValue("rp")) body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index 2985400e0d246..77dd23e80e652 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -71,7 +71,7 @@ func TestWrite(t *testing.T) { case "/api/v2/write": err := r.ParseForm() require.NoError(t, err) - require.Equal(t, r.Form["bucket"], []string{"foobar"}) + require.Equal(t, []string{"foobar"}, r.Form["bucket"]) body, err := io.ReadAll(r.Body) require.NoError(t, err) @@ -131,7 +131,7 @@ func TestWriteBucketTagWorksOnRetry(t *testing.T) { case "/api/v2/write": err := r.ParseForm() require.NoError(t, err) - require.Equal(t, r.Form["bucket"], []string{"foo"}) + require.Equal(t, []string{"foo"}, r.Form["bucket"]) body, err := io.ReadAll(r.Body) require.NoError(t, err) diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index e772ada98df17..184ed3b804ec9 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/Shopify/sarama" + "github.com/IBM/sarama" "github.com/gofrs/uuid/v5" "github.com/influxdata/telegraf" diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index 57608811f0dde..5e28256c40f66 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -5,11 +5,10 @@ import ( "testing" "time" - "github.com/Shopify/sarama" - "github.com/docker/go-connections/nat" + "github.com/IBM/sarama" "github.com/stretchr/testify/require" "github.com/testcontainers/testcontainers-go" - "github.com/testcontainers/testcontainers-go/wait" + kafkacontainer "github.com/testcontainers/testcontainers-go/modules/kafka" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" @@ -28,45 +27,19 @@ func TestConnectAndWriteIntegration(t *testing.T) { } ctx := context.Background() - t.Log("creating test network") - networkName := "telegraf-test-output-kafka-network" - network, err := testcontainers.GenericNetwork(ctx, testcontainers.GenericNetworkRequest{ - NetworkRequest: testcontainers.NetworkRequest{ - Name: networkName, - Attachable: true, - CheckDuplicate: true, - }, - }) + kafkaContainer, err := kafkacontainer.RunContainer(ctx, + kafkacontainer.WithClusterID("test-cluster"), + testcontainers.WithImage("confluentinc/confluent-local:7.5.0"), + ) require.NoError(t, err) - defer func() { - require.NoError(t, network.Remove(ctx), "terminating network failed") - }() + defer kafkaContainer.Terminate(ctx) //nolint:errcheck // ignored - // Start the container as broker AND controller - container := testutil.Container{ - Image: "bitnami/kafka", - Hostname: "localhost", // required to be able to resolve the name - Networks: []string{networkName}, - ExposedPorts: []string{"9092:9092", "9093:9093"}, - Env: map[string]string{ - "KAFKA_CFG_NODE_ID": "0", - "KAFKA_CFG_PROCESS_ROLES": "controller,broker", - "KAFKA_CFG_LISTENERS": "PLAINTEXT://:9092,CONTROLLER://:9093", - "KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP": "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT", - "KAFKA_CFG_CONTROLLER_QUORUM_VOTERS": "0@localhost:9093", - "KAFKA_CFG_CONTROLLER_LISTENER_NAMES": "CONTROLLER", - }, - WaitingFor: wait.ForAll( - wait.ForListeningPort(nat.Port("9092")), - wait.ForLog("Kafka Server started"), - ), - } - require.NoError(t, container.Start(), "failed to start container") - defer container.Terminate() + brokers, err := kafkaContainer.Brokers(ctx) + require.NoError(t, err) // Setup the plugin plugin := &Kafka{ - Brokers: []string{container.Address + ":" + container.Ports["9092"]}, + Brokers: brokers, Topic: "Test", Log: testutil.Logger{}, producerFunc: sarama.NewSyncProducer, diff --git a/plugins/outputs/opensearch/README.md b/plugins/outputs/opensearch/README.md index 5572b3ae30dba..1aed64b1210f9 100644 --- a/plugins/outputs/opensearch/README.md +++ b/plugins/outputs/opensearch/README.md @@ -30,7 +30,7 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. ## Target index name for metrics (OpenSearch will create if it not exists). ## This is a Golang template (see https://pkg.go.dev/text/template) ## You can also specify - ## metric name (`{{.Name}}`), tag value (`{{.Tag "tag_name"}}`), field value (`{{.Field "feild_name"}}`) + ## metric name (`{{.Name}}`), tag value (`{{.Tag "tag_name"}}`), field value (`{{.Field "field_name"}}`) ## If the tag does not exist, the default tag value will be empty string "". ## the timestamp (`{{.Time.Format "xxxxxxxxx"}}`). ## For example: "telegraf-{{.Time.Format "2006-01-02"}}-{{.Tag "host"}}" would set it to telegraf-2023-07-27-HostName @@ -63,9 +63,17 @@ See the [CONFIGURATION.md][CONFIGURATION.md] for more details. # auth_bearer_token = "" ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" + ## Set to true/false to enforce TLS being enabled/disabled. If not set, + ## enable TLS only if any of the other options are specified. + # tls_enable = + ## Trusted root certificates for server + # tls_ca = "/path/to/cafile" + ## Used for TLS client certificate authentication + # tls_cert = "/path/to/certfile" + ## Used for TLS client certificate authentication + # tls_key = "/path/to/keyfile" + ## Send the specified TLS server name via SNI + # tls_server_name = "kubernetes.example.com" ## Use TLS but skip chain & host verification # insecure_skip_verify = false diff --git a/plugins/outputs/opensearch/opensearch.go b/plugins/outputs/opensearch/opensearch.go index 1abc997485912..9d9047787c730 100644 --- a/plugins/outputs/opensearch/opensearch.go +++ b/plugins/outputs/opensearch/opensearch.go @@ -5,7 +5,6 @@ import ( "bytes" "context" "crypto/sha256" - "crypto/tls" _ "embed" "encoding/json" "fmt" @@ -23,7 +22,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" - httpconfig "github.com/influxdata/telegraf/plugins/common/http" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -50,14 +49,14 @@ type Opensearch struct { HealthCheckTimeout config.Duration `toml:"health_check_timeout"` URLs []string `toml:"urls"` Log telegraf.Logger `toml:"-"` - - pipelineName string - indexTmpl *template.Template - pipelineTmpl *template.Template - onSucc func(context.Context, opensearchutil.BulkIndexerItem, opensearchutil.BulkIndexerResponseItem) - onFail func(context.Context, opensearchutil.BulkIndexerItem, opensearchutil.BulkIndexerResponseItem, error) - configOptions httpconfig.HTTPClientConfig - osClient *opensearch.Client + tls.ClientConfig + + pipelineName string + indexTmpl *template.Template + pipelineTmpl *template.Template + onSucc func(context.Context, opensearchutil.BulkIndexerItem, opensearchutil.BulkIndexerResponseItem) + onFail func(context.Context, opensearchutil.BulkIndexerItem, opensearchutil.BulkIndexerResponseItem, error) + osClient *opensearch.Client } //go:embed template.json @@ -158,16 +157,17 @@ func (o *Opensearch) newClient() error { } defer password.Destroy() + tlsConfig, err := o.ClientConfig.TLSConfig() + if err != nil { + return fmt.Errorf("creating TLS config failed: %w", err) + } clientConfig := opensearch.Config{ Addresses: o.URLs, Username: username.String(), Password: password.String(), - } - - if o.configOptions.InsecureSkipVerify { - clientConfig.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, } header := http.Header{} @@ -304,7 +304,7 @@ func (o *Opensearch) Write(metrics []telegraf.Metric) error { return nil } -// BulkIndexer supports pipeline at config level so seperate indexer instance for each unique pipeline +// BulkIndexer supports pipeline at config level so separate indexer instance for each unique pipeline func getTargetIndexers(metrics []telegraf.Metric, osInst *Opensearch) map[string]opensearchutil.BulkIndexer { var indexers = make(map[string]opensearchutil.BulkIndexer) diff --git a/plugins/outputs/opensearch/sample.conf b/plugins/outputs/opensearch/sample.conf index c4a5451c7f41d..ea8428026212e 100644 --- a/plugins/outputs/opensearch/sample.conf +++ b/plugins/outputs/opensearch/sample.conf @@ -10,7 +10,7 @@ ## Target index name for metrics (OpenSearch will create if it not exists). ## This is a Golang template (see https://pkg.go.dev/text/template) ## You can also specify - ## metric name (`{{.Name}}`), tag value (`{{.Tag "tag_name"}}`), field value (`{{.Field "feild_name"}}`) + ## metric name (`{{.Name}}`), tag value (`{{.Tag "tag_name"}}`), field value (`{{.Field "field_name"}}`) ## If the tag does not exist, the default tag value will be empty string "". ## the timestamp (`{{.Time.Format "xxxxxxxxx"}}`). ## For example: "telegraf-{{.Time.Format "2006-01-02"}}-{{.Tag "host"}}" would set it to telegraf-2023-07-27-HostName @@ -43,9 +43,17 @@ # auth_bearer_token = "" ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" + ## Set to true/false to enforce TLS being enabled/disabled. If not set, + ## enable TLS only if any of the other options are specified. + # tls_enable = + ## Trusted root certificates for server + # tls_ca = "/path/to/cafile" + ## Used for TLS client certificate authentication + # tls_cert = "/path/to/certfile" + ## Used for TLS client certificate authentication + # tls_key = "/path/to/keyfile" + ## Send the specified TLS server name via SNI + # tls_server_name = "kubernetes.example.com" ## Use TLS but skip chain & host verification # insecure_skip_verify = false diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 74a72a020ce09..27380f82d93d5 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -26,6 +26,9 @@ to use them. # Configuration for the Prometheus client to spawn [[outputs.prometheus_client]] ## Address to listen on. + ## ex: + ## listen = ":9273" + ## listen = "vsock://:9273" listen = ":9273" ## Maximum duration before timing out read of the request diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 74f088514ab3d..af3bdc30848bd 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -5,13 +5,17 @@ import ( "context" "crypto/tls" _ "embed" + "errors" "fmt" "net" "net/http" "net/url" + "strconv" + "strings" "sync" "time" + "github.com/mdlayher/vsock" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -187,11 +191,38 @@ func (p *PrometheusClient) Init() error { return nil } -func (p *PrometheusClient) listen() (net.Listener, error) { +func (p *PrometheusClient) listenTCP(host string) (net.Listener, error) { if p.server.TLSConfig != nil { - return tls.Listen("tcp", p.Listen, p.server.TLSConfig) + return tls.Listen("tcp", host, p.server.TLSConfig) + } + return net.Listen("tcp", host) +} + +func (p *PrometheusClient) listenVsock(host string) (net.Listener, error) { + _, portStr, err := net.SplitHostPort(host) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portStr, 10, 32) + if err != nil { + return nil, err + } + return vsock.Listen(uint32(port), nil) +} + +func (p *PrometheusClient) listen() (net.Listener, error) { + u, err := url.ParseRequestURI(p.Listen) + // fallback to legacy way + if err != nil { + return p.listenTCP(p.Listen) + } + switch strings.ToLower(u.Scheme) { + case "tcp", "http": + return p.listenTCP(u.Host) + case "vsock": + return p.listenVsock(u.Host) } - return net.Listen("tcp", p.Listen) + return nil, errors.New("Unknown scheme") } func (p *PrometheusClient) Connect() error { diff --git a/plugins/outputs/prometheus_client/sample.conf b/plugins/outputs/prometheus_client/sample.conf index 2e23d60caa20e..7055f1bacfc78 100644 --- a/plugins/outputs/prometheus_client/sample.conf +++ b/plugins/outputs/prometheus_client/sample.conf @@ -1,6 +1,9 @@ # Configuration for the Prometheus client to spawn [[outputs.prometheus_client]] ## Address to listen on. + ## ex: + ## listen = ":9273" + ## listen = "vsock://:9273" listen = ":9273" ## Maximum duration before timing out read of the request diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 4b78d09c26df9..120d2338b35f0 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -118,8 +118,8 @@ func TestWrite(t *testing.T) { require.NoError(t, err) request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) - require.Equal(t, request.TimeSeries[0].Resource.Type, "global") - require.Equal(t, request.TimeSeries[0].Resource.Labels["project_id"], "projects/[PROJECT]") + require.Equal(t, "global", request.TimeSeries[0].Resource.Type) + require.Equal(t, "projects/[PROJECT]", request.TimeSeries[0].Resource.Labels["project_id"]) } func TestWriteResourceTypeAndLabels(t *testing.T) { @@ -150,9 +150,9 @@ func TestWriteResourceTypeAndLabels(t *testing.T) { require.NoError(t, err) request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) - require.Equal(t, request.TimeSeries[0].Resource.Type, "foo") - require.Equal(t, request.TimeSeries[0].Resource.Labels["project_id"], "projects/[PROJECT]") - require.Equal(t, request.TimeSeries[0].Resource.Labels["mylabel"], "myvalue") + require.Equal(t, "foo", request.TimeSeries[0].Resource.Type) + require.Equal(t, "projects/[PROJECT]", request.TimeSeries[0].Resource.Labels["project_id"]) + require.Equal(t, "myvalue", request.TimeSeries[0].Resource.Labels["mylabel"]) } func TestWriteTagsAsResourceLabels(t *testing.T) { diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 52ff988cef39f..52c1c5a6495c9 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -321,7 +321,7 @@ func TestContentEncodingGzip(t *testing.T) { payload, err := io.ReadAll(body) require.NoError(t, err) - require.Equal(t, string(payload), "metric=cpu field=value 42 0\n") + require.Equal(t, "metric=cpu field=value 42 0\n", string(payload)) w.WriteHeader(http.StatusNoContent) }) diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index 12a7b34839718..5e6df4168107a 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -269,14 +269,14 @@ func TestWriteMultiMeasuresSingleTableMode(t *testing.T) { for _, r := range result { transformedRecords = append(transformedRecords, r.Records...) // Assert that we use measure name from input - require.Equal(t, *r.Records[0].MeasureName, "multi_measure_name") + require.Equal(t, "multi_measure_name", *r.Records[0].MeasureName) } // Expected 101 records require.Len(t, transformedRecords, recordCount+1, "Expected 101 records after transforming") // validate write to TS err := plugin.Write(inputs) require.NoError(t, err, "Write to Timestream failed") - require.Equal(t, mockClient.WriteRecordsRequestCount, 2, "Expected 2 WriteRecords calls") + require.Equal(t, 2, mockClient.WriteRecordsRequestCount, "Expected 2 WriteRecords calls") } func TestWriteMultiMeasuresMultiTableMode(t *testing.T) { @@ -324,7 +324,7 @@ func TestWriteMultiMeasuresMultiTableMode(t *testing.T) { require.Len(t, result, 1, "Expected 1 WriteRecordsInput requests") // Assert that we use measure name from config - require.Equal(t, *result[0].Records[0].MeasureName, "config-multi-measure-name") + require.Equal(t, "config-multi-measure-name", *result[0].Records[0].MeasureName) var transformedRecords []types.Record for _, r := range result { @@ -342,7 +342,7 @@ func TestWriteMultiMeasuresMultiTableMode(t *testing.T) { // validate successful write to TS err = plugin.Write(inputs) require.NoError(t, err, "Write to Timestream failed") - require.Equal(t, mockClient.WriteRecordsRequestCount, 1, "Expected 1 WriteRecords call") + require.Equal(t, 1, mockClient.WriteRecordsRequestCount, "Expected 1 WriteRecords call") } func TestBuildMultiMeasuresInSingleAndMultiTableMode(t *testing.T) { @@ -638,7 +638,7 @@ func TestWriteWhenRequestsGreaterThanMaxWriteGoRoutinesCount(t *testing.T) { err := plugin.Write(inputs) require.NoError(t, err, "Expected to write without any errors ") - require.Equal(t, mockClient.WriteRecordsRequestCount, maxWriteRecordsCalls, "Expected 5 calls to WriteRecords") + require.Equal(t, maxWriteRecordsCalls, mockClient.WriteRecordsRequestCount, "Expected 5 calls to WriteRecords") } func TestWriteWhenRequestsLesserThanMaxWriteGoRoutinesCount(t *testing.T) { @@ -677,7 +677,7 @@ func TestWriteWhenRequestsLesserThanMaxWriteGoRoutinesCount(t *testing.T) { err := plugin.Write(inputs) require.NoError(t, err, "Expected to write without any errors ") - require.Equal(t, mockClient.WriteRecordsRequestCount, maxWriteRecordsCalls, "Expected 5 calls to WriteRecords") + require.Equal(t, maxWriteRecordsCalls, mockClient.WriteRecordsRequestCount, "Expected 5 calls to WriteRecords") } func TestTransformMetricsSkipEmptyMetric(t *testing.T) { diff --git a/plugins/parsers/avro/parser_test.go b/plugins/parsers/avro/parser_test.go index 0fc599bdc54ba..338e3844474a3 100644 --- a/plugins/parsers/avro/parser_test.go +++ b/plugins/parsers/avro/parser_test.go @@ -5,6 +5,7 @@ import ( "path/filepath" "testing" + "github.com/linkedin/goavro/v2" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -80,3 +81,101 @@ func TestCases(t *testing.T) { }) } } + +const benchmarkSchema = ` +{ + "namespace": "com.benchmark", + "name": "benchmark", + "type": "record", + "version": "1", + "fields": [ + {"name": "value", "type": "float", "doc": ""}, + {"name": "timestamp", "type": "long", "doc": ""}, + {"name": "tags_platform", "type": "string", "doc": ""}, + {"name": "tags_sdkver", "type": "string", "default": "", "doc": ""}, + {"name": "source", "type": "string", "default": "", "doc": ""} + ] +} +` + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{ + Format: "json", + Measurement: "benchmark", + Tags: []string{"tags_platform", "tags_sdkver", "source"}, + Fields: []string{"value"}, + Timestamp: "timestamp", + TimestampFormat: "unix", + Schema: benchmarkSchema, + } + require.NoError(b, plugin.Init()) + + benchmarkData, err := os.ReadFile(filepath.Join("testdata", "benchmark", "message.json")) + require.NoError(b, err) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse(benchmarkData) + } +} + +func TestBenchmarkDataBinary(t *testing.T) { + plugin := &Parser{ + Measurement: "benchmark", + Tags: []string{"tags_platform", "tags_sdkver", "source"}, + Fields: []string{"value"}, + Timestamp: "timestamp", + TimestampFormat: "unix", + Schema: benchmarkSchema, + } + require.NoError(t, plugin.Init()) + + benchmarkDir := filepath.Join("testdata", "benchmark") + + // Read the expected valued from file + parser := &influx.Parser{} + require.NoError(t, parser.Init()) + expected, err := testutil.ParseMetricsFromFile(filepath.Join(benchmarkDir, "expected.out"), parser) + require.NoError(t, err) + + // Re-encode the benchmark data from JSON to binary format + jsonData, err := os.ReadFile(filepath.Join(benchmarkDir, "message.json")) + require.NoError(t, err) + codec, err := goavro.NewCodec(benchmarkSchema) + require.NoError(t, err) + native, _, err := codec.NativeFromTextual(jsonData) + require.NoError(t, err) + benchmarkData, err := codec.BinaryFromNative(nil, native) + require.NoError(t, err) + + // Do the actual testing + actual, err := plugin.Parse(benchmarkData) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) +} + +func BenchmarkParsingBinary(b *testing.B) { + plugin := &Parser{ + Measurement: "benchmark", + Tags: []string{"tags_platform", "tags_sdkver", "source"}, + Fields: []string{"value"}, + Timestamp: "timestamp", + TimestampFormat: "unix", + Schema: benchmarkSchema, + } + require.NoError(b, plugin.Init()) + + // Re-encode the benchmark data from JSON to binary format + jsonData, err := os.ReadFile(filepath.Join("testdata", "benchmark", "message.json")) + require.NoError(b, err) + codec, err := goavro.NewCodec(benchmarkSchema) + require.NoError(b, err) + native, _, err := codec.NativeFromTextual(jsonData) + require.NoError(b, err) + benchmarkData, err := codec.BinaryFromNative(nil, native) + require.NoError(b, err) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse(benchmarkData) + } +} diff --git a/plugins/parsers/avro/schema_registry.go b/plugins/parsers/avro/schema_registry.go index bbb467dd7f81e..5ca53987df2b4 100644 --- a/plugins/parsers/avro/schema_registry.go +++ b/plugins/parsers/avro/schema_registry.go @@ -99,12 +99,12 @@ func (sr *schemaRegistry) getSchemaAndCodec(id int) (*schemaAndCodec, error) { schema, ok := jsonResponse["schema"] if !ok { - return nil, fmt.Errorf("malformed respose from schema registry: no 'schema' key") + return nil, fmt.Errorf("malformed response from schema registry: no 'schema' key") } schemaValue, ok := schema.(string) if !ok { - return nil, fmt.Errorf("malformed respose from schema registry: %v cannot be cast to string", schema) + return nil, fmt.Errorf("malformed response from schema registry: %v cannot be cast to string", schema) } codec, err := goavro.NewCodec(schemaValue) if err != nil { diff --git a/plugins/parsers/avro/testdata/benchmark/expected.out b/plugins/parsers/avro/testdata/benchmark/expected.out new file mode 100644 index 0000000000000..adc11dbaa2ee8 --- /dev/null +++ b/plugins/parsers/avro/testdata/benchmark/expected.out @@ -0,0 +1 @@ +benchmark,source=myhost,tags_platform=python,tags_sdkver=3.11.5 value=5.0 1653643421000000000 diff --git a/plugins/parsers/avro/testdata/benchmark/message.json b/plugins/parsers/avro/testdata/benchmark/message.json new file mode 100644 index 0000000000000..3dcef7e686e8b --- /dev/null +++ b/plugins/parsers/avro/testdata/benchmark/message.json @@ -0,0 +1,7 @@ +{ + "timestamp": 1653643421, + "value": 5, + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5" +} diff --git a/plugins/parsers/avro/testdata/benchmark/telegraf.conf b/plugins/parsers/avro/testdata/benchmark/telegraf.conf new file mode 100644 index 0000000000000..c20f7ccc753c9 --- /dev/null +++ b/plugins/parsers/avro/testdata/benchmark/telegraf.conf @@ -0,0 +1,25 @@ +[[ inputs.file ]] + files = ["./testdata/benchmark/message.json"] + data_format = "avro" + + avro_format = "json" + avro_measurement = "benchmark" + avro_tags = ["tags_platform", "tags_sdkver", "source"] + avro_fields = ["value"] + avro_timestamp = "timestamp" + avro_timestamp_format = "unix" + avro_schema = ''' + { + "namespace": "com.benchmark", + "name": "benchmark", + "type": "record", + "version": "1", + "fields": [ + {"name": "value", "type": "float", "doc": ""}, + {"name": "timestamp", "type": "long", "doc": ""}, + {"name": "tags_platform", "type": "string", "doc": ""}, + {"name": "tags_sdkver", "type": "string", "default": "", "doc": ""}, + {"name": "source", "type": "string", "default": "", "doc": ""} + ] + } + ''' diff --git a/plugins/parsers/binary/parser_test.go b/plugins/parsers/binary/parser_test.go index a872364e3ca48..3db7b4af3237f 100644 --- a/plugins/parsers/binary/parser_test.go +++ b/plugins/parsers/binary/parser_test.go @@ -1499,3 +1499,131 @@ func TestHexEncoding(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, metrics) } + +var benchmarkData = [][]byte{ + { + 0x6d, 0x79, 0x68, 0x6f, 0x73, 0x74, 0x00, 0x33, + 0x2e, 0x31, 0x31, 0x2e, 0x35, 0x00, 0x70, 0x79, + 0x74, 0x68, 0x6f, 0x6e, 0x00, 0x40, 0x14, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + }, + { + 0x6d, 0x79, 0x68, 0x6f, 0x73, 0x74, 0x00, 0x33, + 0x2e, 0x31, 0x31, 0x2e, 0x34, 0x00, 0x70, 0x79, + 0x74, 0x68, 0x6f, 0x6e, 0x00, 0x40, 0x10, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + }, +} + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{ + Endianness: "be", + Configs: []Config{ + { + MetricName: "benchmark", + Entries: []Entry{ + { + Name: "source", + Type: "string", + Assignment: "tag", + Terminator: "null", + }, + { + Name: "tags_sdkver", + Type: "string", + Assignment: "tag", + Terminator: "null", + }, + { + Name: "tags_platform", + Type: "string", + Assignment: "tag", + Terminator: "null", + }, + { + Name: "value", + Type: "float64", + Assignment: "field", + }, + }, + }, + }, + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5.0, + }, + time.Unix(0, 0), + ), + metric.New( + "benchmark", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": 4.0, + }, + time.Unix(0, 0), + ), + } + + actual := make([]telegraf.Metric, 0, 2) + for _, buf := range benchmarkData { + m, err := plugin.Parse(buf) + require.NoError(t, err) + actual = append(actual, m...) + } + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{ + Endianness: "be", + Configs: []Config{ + { + MetricName: "benchmark", + Entries: []Entry{ + { + Name: "source", + Type: "string", + Assignment: "tag", + Terminator: "null", + }, + { + Name: "tags_sdkver", + Type: "string", + Assignment: "tag", + Terminator: "null", + }, + { + Name: "tags_platform", + Type: "string", + Assignment: "tag", + Terminator: "null", + }, + { + Name: "value", + Type: "float64", + Assignment: "field", + }, + }, + }, + }, + } + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse(benchmarkData[n%2]) + } +} diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go index e9108bac76acd..fbf9eced8a0e9 100644 --- a/plugins/parsers/collectd/parser_test.go +++ b/plugins/parsers/collectd/parser_test.go @@ -3,12 +3,15 @@ package collectd import ( "context" "testing" + "time" "collectd.org/api" "collectd.org/network" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" ) type AuthMap struct { @@ -112,7 +115,7 @@ func TestNewCollectdParser(t *testing.T) { ParseMultiValue: "join", } require.NoError(t, parser.Init()) - require.Equal(t, parser.popts.SecurityLevel, network.None) + require.Equal(t, network.None, parser.popts.SecurityLevel) require.NotNil(t, parser.popts.PasswordLookup) require.Nil(t, parser.popts.TypesDB) } @@ -290,10 +293,10 @@ func TestParseLine(t *testing.T) { ParseMultiValue: "split", } require.NoError(t, parser.Init()) - metric, err := parser.ParseLine(string(bytes)) + m, err := parser.ParseLine(string(bytes)) require.NoError(t, err) - assertEqualMetrics(t, singleMetric.expected, []telegraf.Metric{metric}) + assertEqualMetrics(t, singleMetric.expected, []telegraf.Metric{m}) } func writeValueList(valueLists []api.ValueList) (*network.Buffer, error) { @@ -318,3 +321,90 @@ func assertEqualMetrics(t *testing.T, expected []metricData, received []telegraf require.Equal(t, expected[i].fields, m.Fields()) } } + +var benchmarkData = []api.ValueList{ + { + Identifier: api.Identifier{ + Host: "xyzzy", + Plugin: "cpu", + PluginInstance: "1", + Type: "cpu", + TypeInstance: "user", + }, + Values: []api.Value{ + api.Counter(4), + }, + DSNames: []string(nil), + }, + { + Identifier: api.Identifier{ + Host: "xyzzy", + Plugin: "cpu", + PluginInstance: "2", + Type: "cpu", + TypeInstance: "user", + }, + Values: []api.Value{ + api.Counter(5), + }, + DSNames: []string(nil), + }, +} + +func TestBenchmarkData(t *testing.T) { + expected := []telegraf.Metric{ + metric.New( + "cpu_value", + map[string]string{ + "host": "xyzzy", + "instance": "1", + "type": "cpu", + "type_instance": "user", + }, + map[string]interface{}{ + "value": 4.0, + }, + time.Unix(0, 0), + ), + metric.New( + "cpu_value", + map[string]string{ + "host": "xyzzy", + "instance": "2", + "type": "cpu", + "type_instance": "user", + }, + map[string]interface{}{ + "value": 5.0, + }, + time.Unix(0, 0), + ), + } + + buf, err := writeValueList(benchmarkData) + require.NoError(t, err) + bytes, err := buf.Bytes() + require.NoError(t, err) + + parser := &Parser{} + require.NoError(t, parser.Init()) + actual, err := parser.Parse(bytes) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + buf, err := writeValueList(benchmarkData) + require.NoError(b, err) + bytes, err := buf.Bytes() + require.NoError(b, err) + + parser := &Parser{} + require.NoError(b, parser.Init()) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, _ = parser.Parse(bytes) + } +} diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index b6936010b3572..a9520fb0c2f4d 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -45,7 +45,7 @@ values. ## A list of metadata separators. If csv_metadata_rows is set, ## csv_metadata_separators must contain at least one separator. - ## Please note that separators are case sensitive and the sequence of the seperators are respected. + ## Please note that separators are case sensitive and the sequence of the separators are respected. csv_metadata_separators = [":", "="] ## A set of metadata trim characters. diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index b26d46470950e..1022bf2328715 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -105,8 +105,8 @@ func TestTimestamp(t *testing.T) { metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) - require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706000000000)) - require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) + require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) + require.Equal(t, int64(1257609906000000000), metrics[1].Time().UnixNano()) } func TestTimestampYYYYMMDDHHmm(t *testing.T) { @@ -127,8 +127,8 @@ func TestTimestampYYYYMMDDHHmm(t *testing.T) { metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) - require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094700000000000)) - require.Equal(t, metrics[1].Time().UnixNano(), int64(1247328300000000000)) + require.Equal(t, int64(1243094700000000000), metrics[0].Time().UnixNano()) + require.Equal(t, int64(1247328300000000000), metrics[1].Time().UnixNano()) } func TestTimestampError(t *testing.T) { p := &Parser{ @@ -163,8 +163,8 @@ func TestTimestampUnixFormat(t *testing.T) { 1257609906,80,test_name2` metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) - require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706000000000)) - require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) + require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) + require.Equal(t, int64(1257609906000000000), metrics[1].Time().UnixNano()) } func TestTimestampUnixMSFormat(t *testing.T) { @@ -183,8 +183,8 @@ func TestTimestampUnixMSFormat(t *testing.T) { 1257609906123,80,test_name2` metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) - require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706123000000)) - require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906123000000)) + require.Equal(t, int64(1243094706123000000), metrics[0].Time().UnixNano()) + require.Equal(t, int64(1257609906123000000), metrics[1].Time().UnixNano()) } func TestQuotedCharacter(t *testing.T) { @@ -673,8 +673,8 @@ func TestTimestampTimezone(t *testing.T) { metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) - require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706000000000)) - require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) + require.Equal(t, int64(1243094706000000000), metrics[0].Time().UnixNano()) + require.Equal(t, int64(1257609906000000000), metrics[1].Time().UnixNano()) } func TestEmptyMeasurementName(t *testing.T) { @@ -860,8 +860,8 @@ func TestParseMetadataSeparators(t *testing.T) { } err = p.Init() require.Error(t, err) - require.Equal(t, err.Error(), "initializing separators failed: "+ - "csv_metadata_separators required when specifying csv_metadata_rows") + require.Equal(t, "initializing separators failed: "+ + "csv_metadata_separators required when specifying csv_metadata_rows", err.Error()) p = &Parser{ ColumnNames: []string{"a", "b"}, MetadataRows: 1, @@ -871,7 +871,7 @@ func TestParseMetadataSeparators(t *testing.T) { require.NoError(t, err) require.Len(t, p.metadataSeparatorList, 4) require.Empty(t, p.MetadataTrimSet) - require.Equal(t, p.metadataSeparatorList, metadataPattern{":=", ",", "=", ":"}) + require.Equal(t, metadataPattern{":=", ",", "=", ":"}, p.metadataSeparatorList) p = &Parser{ ColumnNames: []string{"a", "b"}, MetadataRows: 1, @@ -882,7 +882,7 @@ func TestParseMetadataSeparators(t *testing.T) { require.NoError(t, err) require.Len(t, p.metadataSeparatorList, 4) require.Len(t, p.MetadataTrimSet, 3) - require.Equal(t, p.metadataSeparatorList, metadataPattern{":=", ",", ":", "="}) + require.Equal(t, metadataPattern{":=", ",", ":", "="}, p.metadataSeparatorList) } func TestParseMetadataRow(t *testing.T) { @@ -897,13 +897,13 @@ func TestParseMetadataRow(t *testing.T) { m := p.parseMetadataRow("# this is a not matching string") require.Nil(t, m) m = p.parseMetadataRow("# key1 : value1 \r\n") - require.Equal(t, m, map[string]string{"# key1 ": " value1 "}) + require.Equal(t, map[string]string{"# key1 ": " value1 "}, m) m = p.parseMetadataRow("key2=1234\n") - require.Equal(t, m, map[string]string{"key2": "1234"}) + require.Equal(t, map[string]string{"key2": "1234"}, m) m = p.parseMetadataRow(" file created : 2021-10-08T12:34:18+10:00 \r\n") - require.Equal(t, m, map[string]string{" file created ": " 2021-10-08T12:34:18+10:00 "}) + require.Equal(t, map[string]string{" file created ": " 2021-10-08T12:34:18+10:00 "}, m) m = p.parseMetadataRow("file created: 2021-10-08T12:34:18\t\r\r\n") - require.Equal(t, m, map[string]string{"file created": " 2021-10-08T12:34:18\t"}) + require.Equal(t, map[string]string{"file created": " 2021-10-08T12:34:18\t"}, m) p = &Parser{ ColumnNames: []string{"a", "b"}, MetadataRows: 5, @@ -916,13 +916,13 @@ func TestParseMetadataRow(t *testing.T) { m = p.parseMetadataRow("# this is a not matching string") require.Nil(t, m) m = p.parseMetadataRow("# key1 : value1 \r\n") - require.Equal(t, m, map[string]string{"key1": "value1"}) + require.Equal(t, map[string]string{"key1": "value1"}, m) m = p.parseMetadataRow("key2=1234\n") - require.Equal(t, m, map[string]string{"key2": "1234"}) + require.Equal(t, map[string]string{"key2": "1234"}, m) m = p.parseMetadataRow(" file created : 2021-10-08T12:34:18+10:00 \r\n") - require.Equal(t, m, map[string]string{"file created": "2021-10-08T12:34:18+10:00"}) + require.Equal(t, map[string]string{"file created": "2021-10-08T12:34:18+10:00"}, m) m = p.parseMetadataRow("file created: '2021-10-08T12:34:18'\r\n") - require.Equal(t, m, map[string]string{"file created": "2021-10-08T12:34:18"}) + require.Equal(t, map[string]string{"file created": "2021-10-08T12:34:18"}, m) } func TestParseCSVFileWithMetadata(t *testing.T) { @@ -1515,3 +1515,65 @@ func TestParseCSVLinewiseResetModeAlways(t *testing.T) { `parsing time "garbage nonsense that needs be skipped" as "2006-01-02T15:04:05Z07:00": cannot parse "garbage nonsense that needs be skipped" as "2006"`, ) } + +const benchmarkData = `tags_host,tags_platform,tags_sdkver,value,timestamp +myhost,python,3.11.5,5,1653643420 +myhost,python,3.11.4,4,1653643420 +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{ + MetricName: "benchmark", + HeaderRowCount: 1, + TimestampColumn: "timestamp", + TimestampFormat: "unix", + TagColumns: []string{"tags_host", "tags_platform", "tags_sdkver"}, + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5, + }, + time.Unix(1653643420, 0), + ), + metric.New( + "benchmark", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": 4, + }, + time.Unix(1653643420, 0), + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{ + MetricName: "benchmark", + HeaderRowCount: 1, + TimestampColumn: "timestamp", + TimestampFormat: "unix", + TagColumns: []string{"tags_host", "tags_platform", "tags_sdkver"}, + } + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index e8eb56c87b28a..f388b31fd2ca8 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -589,3 +589,63 @@ func TestDropWizard(t *testing.T) { }) } } + +const benchmarkData = `{ + "version": "3.0.0", + "gauges" : { + "benchmark,tags_host=myhost,tags_sdkver=3.11.5,tags_platform=python": { + "value": 5.0 + }, + "benchmark,tags_host=myhost,tags_sdkver=3.11.4,tags_platform=python": { + "value": 4.0 + } + } +} +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{} + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "metric_type": "gauge", + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5.0, + }, + time.Unix(0, 0), + ), + metric.New( + "benchmark", + map[string]string{ + "metric_type": "gauge", + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": 4.0, + }, + time.Unix(0, 0), + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{} + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/form_urlencoded/parser_test.go b/plugins/parsers/form_urlencoded/parser_test.go index 45daadad078ee..8b30204e831c0 100644 --- a/plugins/parsers/form_urlencoded/parser_test.go +++ b/plugins/parsers/form_urlencoded/parser_test.go @@ -2,7 +2,11 @@ package form_urlencoded import ( "testing" + "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -35,14 +39,14 @@ func TestParseLineValidFormData(t *testing.T) { MetricName: "form_urlencoded_test", } - metric, err := parser.ParseLine(validFormData) + metrics, err := parser.ParseLine(validFormData) require.NoError(t, err) - require.Equal(t, "form_urlencoded_test", metric.Name()) - require.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, "form_urlencoded_test", metrics.Name()) + require.Equal(t, map[string]string{}, metrics.Tags()) require.Equal(t, map[string]interface{}{ "field1": float64(42), "field2": float64(69), - }, metric.Fields()) + }, metrics.Fields()) } func TestParseValidFormDataWithTags(t *testing.T) { @@ -170,3 +174,42 @@ func TestParseInvalidFormDataEmptyString(t *testing.T) { require.NoError(t, err) require.Empty(t, metrics) } + +const benchmarkData = `tags_host=myhost&tags_platform=python&tags_sdkver=3.11.5&value=5` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{ + MetricName: "benchmark", + TagKeys: []string{"tags_host", "tags_platform", "tags_sdkver"}, + } + + expected := []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5.0, + }, + time.Unix(0, 0), + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{ + MetricName: "benchmark", + TagKeys: []string{"source", "tags_platform", "tags_sdkver"}, + } + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index 06bdf2bcc18cd..0ba1573dbbf9b 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1089,7 +1089,7 @@ func TestDynamicMeasurementModifier(t *testing.T) { require.NoError(t, p.Compile()) m, err := p.ParseLine("4 5 hello") require.NoError(t, err) - require.Equal(t, m.Name(), "hello") + require.Equal(t, "hello", m.Name()) } func TestStaticMeasurementModifier(t *testing.T) { @@ -1114,7 +1114,7 @@ func TestTwoMeasurementModifier(t *testing.T) { require.NoError(t, p.Compile()) m, err := p.ParseLine("4 5 hello") require.NoError(t, err) - require.Equal(t, m.Name(), "4 5 hello") + require.Equal(t, "4 5 hello", m.Name()) } func TestMeasurementModifierNoName(t *testing.T) { @@ -1126,7 +1126,7 @@ func TestMeasurementModifierNoName(t *testing.T) { require.NoError(t, p.Compile()) m, err := p.ParseLine("4 5 hello") require.NoError(t, err) - require.Equal(t, m.Name(), "hello") + require.Equal(t, "hello", m.Name()) } func TestEmptyYearInTimestamp(t *testing.T) { @@ -1184,3 +1184,58 @@ func TestMultilineNilMetric(t *testing.T) { require.NoError(t, err) require.Empty(t, actual) } + +const benchmarkData = `benchmark 5 1653643421 source=myhost tags_platform=python tags_sdkver=3.11.5 +benchmark 4 1653643422 source=myhost tags_platform=python tags_sdkver=3.11.4 +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{ + //nolint:lll // conditionally long lines allowed + Patterns: []string{"%{WORD:measurement:measurement} %{NUMBER:value:float} %{NUMBER:timestamp:ts-epoch} source=%{WORD:source:tag} tags_platform=%{WORD:tags_platform:tag} tags_sdkver=%{GREEDYDATA:tags_sdkver:tag}"}, + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5.0, + }, + time.Unix(1653643421, 0), + ), + metric.New( + "benchmark", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": 4.0, + }, + time.Unix(1653643422, 0), + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{ + //nolint:lll // conditionally long lines allowed + Patterns: []string{"%{WORD:measurement:measurement} %{NUMBER:value:float} %{NUMBER:timestamp:ts-epoch} source=%{WORD:source:tag} tags_platform=%{WORD:tags_platform:tag} tags_sdkver=%{GREEDYDATA:tags_sdkver:tag}"}, + } + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/influx/influx_upstream/parser_test.go b/plugins/parsers/influx/influx_upstream/parser_test.go index 9af4ed9005e58..05146234e8728 100644 --- a/plugins/parsers/influx/influx_upstream/parser_test.go +++ b/plugins/parsers/influx/influx_upstream/parser_test.go @@ -1018,3 +1018,53 @@ func TestStreamParserProducesAllAvailableMetrics(t *testing.T) { _, err = parser.Next() require.NoError(t, err) } + +const benchmarkData = `benchmark,tags_host=myhost,tags_platform=python,tags_sdkver=3.11.5 value=5 1653643421 +benchmark,tags_host=myhost,tags_platform=python,tags_sdkver=3.11.4 value=4 1653643422 +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{} + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": float64(5), + }, + time.Unix(1653643422, 0), + ), + metric.New( + "benchmark", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": float64(4), + }, + time.Unix(1653643422, 0), + ), + } + + // Do the parsing + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{} + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index a76fe6f6454e8..e1f3d6693325b 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -985,3 +985,53 @@ func TestStreamParserProducesAllAvailableMetrics(t *testing.T) { _, err = parser.Next() require.NoError(t, err) } + +const benchmarkData = `benchmark,tags_host=myhost,tags_platform=python,tags_sdkver=3.11.5 value=5 1653643421 +benchmark,tags_host=myhost,tags_platform=python,tags_sdkver=3.11.4 value=4 1653643422 +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{} + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": float64(5), + }, + time.Unix(1653643422, 0), + ), + metric.New( + "benchmark", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": float64(4), + }, + time.Unix(1653643422, 0), + ), + } + + // Do the parsing + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{} + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index 5ea82fa2ac906..5e3a64f57b410 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -672,7 +672,7 @@ func TestUseCaseJSONQuery(t *testing.T) { actual, err := parser.Parse([]byte(testString)) require.NoError(t, err) require.Len(t, actual, 3) - require.Equal(t, actual[0].Fields()["last"], "Murphy") + require.Equal(t, "Murphy", actual[0].Fields()["last"]) } func TestTimeParser(t *testing.T) { @@ -1440,7 +1440,7 @@ func TestBenchmarkData(t *testing.T) { // Do the parsing actual, err := plugin.Parse([]byte(benchmarkData)) require.NoError(t, err) - testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) } func BenchmarkParsingSequential(b *testing.B) { diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md index 992410c592f9f..8fefdc7885b00 100644 --- a/plugins/parsers/json_v2/README.md +++ b/plugins/parsers/json_v2/README.md @@ -5,7 +5,7 @@ syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: [gjson.dev/](https://gjson.dev). You can find multiple examples under the -`testdata` folder. +[`testdata`][] folder. ## Configuration @@ -77,10 +77,10 @@ sections that follow these configuration keys are defined in more detail. * **measurement_name (OPTIONAL)**: Will set the measurement name to the provided string. * **measurement_name_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a measurement name from the JSON input. The query must return a single data value or it will use the default measurement name. This takes precedence over `measurement_name`. * **timestamp_path (OPTIONAL)**: You can define a query with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) to set a timestamp from the JSON input. The query must return a single data value or it will default to the current time. -* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_path is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or the Go "reference time" which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006` -* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a +* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_path**: This option should be set to a [Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` @@ -162,10 +162,10 @@ TOML as an array table using double brackets. *Keys to define what JSON keys should be used as timestamps:* * **timestamp_key(OPTIONAL)**: You can define a json key (for a nested key, prepend the parent keys with underscores) for the value to be set as the timestamp from the JSON input. -* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +* **timestamp_format (OPTIONAL, but REQUIRED when timestamp_key is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or the Go "reference time" which is defined to be the specific time: `Mon Jan 2 15:04:05 MST 2006` -* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a +* **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_key**: This option should be set to a [Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` @@ -180,7 +180,7 @@ such as `America/New_York`, to `Local` to utilize the system timezone, or to `UT *Configuration to modify the resutling line protocol:* * **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled -* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results +* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (opposed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results * **fields (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type ## Arrays and Objects @@ -194,7 +194,7 @@ The following describes the high-level approach when parsing arrays and objects: When handling nested arrays and objects, these above rules continue to apply as the parser creates line protocol. When an object has multiple array's as values, the array's will become separate line protocol containing only non-array values -from the obejct. Below you can see an example of this behavior, with an input +from the object. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. Example JSON: @@ -252,7 +252,9 @@ file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",random=2 ``` -You can find more complicated examples under the folder `testdata`. +You can find more complicated examples under the folder [`testdata`][]. + +[`testdata`]: https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json_v2/testdata ## Types diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index 9c3b2c033a36a..1b339a7a08b71 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -24,7 +24,7 @@ type Parser struct { DefaultTags map[string]string `toml:"-"` Log telegraf.Logger `toml:"-"` - // **** The struct fields bellow this comment are used for processing indvidual configs **** + // **** The struct fields below this comment are used for processing individual configs **** // measurementName is the name of the current config used in each line protocol measurementName string @@ -165,7 +165,7 @@ func (p *Parser) parseCriticalPath(input []byte) ([]telegraf.Metric, error) { } if !result.IsArray() && !result.IsObject() { if c.TimestampFormat == "" { - err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") + err := fmt.Errorf("use of 'timestamp_path' requires 'timestamp_format'") return nil, err } @@ -346,6 +346,7 @@ func (p *Parser) expandArray(result MetricNode, timestamp time.Time) ([]telegraf n.ParentIndex = n.Index + result.ParentIndex r, err := p.combineObject(n, timestamp) if err != nil { + p.Log.Error(err) return false } @@ -366,6 +367,7 @@ func (p *Parser) expandArray(result MetricNode, timestamp time.Time) ([]telegraf n.ParentIndex = n.Index + result.ParentIndex r, err := p.expandArray(n, timestamp) if err != nil { + p.Log.Error(err) return false } results = append(results, r...) @@ -377,7 +379,7 @@ func (p *Parser) expandArray(result MetricNode, timestamp time.Time) ([]telegraf } else { if p.objectConfig.TimestampKey != "" && result.SetName == p.objectConfig.TimestampKey { if p.objectConfig.TimestampFormat == "" { - err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") + err := fmt.Errorf("use of 'timestamp_key' requires 'timestamp_format'") return nil, err } var loc *time.Location @@ -586,6 +588,7 @@ func (p *Parser) combineObject(result MetricNode, timestamp time.Time) ([]telegr if val.IsObject() { results, err = p.combineObject(arrayNode, timestamp) if err != nil { + p.Log.Error(err) return false } } else { @@ -593,6 +596,7 @@ func (p *Parser) combineObject(result MetricNode, timestamp time.Time) ([]telegr arrayNode.ParentIndex -= result.Index r, err := p.expandArray(arrayNode, timestamp) if err != nil { + p.Log.Error(err) return false } results = cartesianProduct(r, results) diff --git a/plugins/parsers/logfmt/parser_test.go b/plugins/parsers/logfmt/parser_test.go index 8b70291519ce5..9d7f9d97b8465 100644 --- a/plugins/parsers/logfmt/parser_test.go +++ b/plugins/parsers/logfmt/parser_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -282,3 +283,56 @@ func TestTags(t *testing.T) { }) } } + +const benchmarkData = `tags_host=myhost tags_platform=python tags_sdkver=3.11.5 value=5 +tags_host=myhost tags_platform=python tags_sdkver=3.11.4 value=4 +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{ + TagKeys: []string{"tags_host", "tags_platform", "tags_sdkver"}, + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5, + }, + time.Unix(0, 0), + ), + metric.New( + "", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": 4, + }, + time.Unix(0, 0), + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{ + TagKeys: []string{"tags_host", "tags_platform", "tags_sdkver"}, + } + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index dd3dd98d51537..464f56af7eac7 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -524,3 +524,53 @@ func TestParseThreshold(t *testing.T) { require.Equal(t, tests[i].eErr, err) } } + +const benchmarkData = `DISK OK - free space: / 3326 MB (56%); | /=2643MB;5948;5958;0;5968 +/ 15272 MB (77%); +/boot 68 MB (69%); +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{} + + expected := []telegraf.Metric{ + metric.New( + "nagios", + map[string]string{ + "perfdata": "/", + "unit": "MB", + }, + map[string]interface{}{ + "critical_gt": 5958.0, + "critical_lt": 0.0, + "min": 0.0, + "max": 5968.0, + "value": 2643.0, + "warning_gt": 5948.0, + "warning_lt": 0.0, + }, + time.Unix(0, 0), + ), + metric.New( + "nagios_state", + map[string]string{}, + map[string]interface{}{ + "long_service_output": "/ 15272 MB (77%);\n/boot 68 MB (69%);", + "service_output": "DISK OK - free space: / 3326 MB (56%);", + }, + time.Unix(0, 0), + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{} + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/opentsdb/parser_test.go b/plugins/parsers/opentsdb/parser_test.go index 6c3fea7255d31..5f186b8fa4f17 100644 --- a/plugins/parsers/opentsdb/parser_test.go +++ b/plugins/parsers/opentsdb/parser_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -306,3 +307,50 @@ func TestParse_DefaultTags(t *testing.T) { }) } } + +const benchmarkData = `put benchmark_a 1653643420 4 tags_host=myhost tags_platform=python tags_sdkver=3.11.4 +put benchmark_b 1653643420 5 tags_host=myhost tags_platform=python tags_sdkver=3.11.5 +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{} + + expected := []telegraf.Metric{ + metric.New( + "benchmark_a", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": 4.0, + }, + time.Unix(1653643420, 0), + ), + metric.New( + "benchmark_b", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5.0, + }, + time.Unix(1653643420, 0), + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{} + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index af9c626d5d220..6d66be4c2b666 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" "github.com/influxdata/telegraf/testutil" ) @@ -334,7 +335,7 @@ func TestDefautTags(t *testing.T) { parser := Parser{ DefaultTags: map[string]string{ "defaultTag": "defaultTagValue", - "dockerVersion": "to_be_overriden", + "dockerVersion": "to_be_overridden", }, } metrics, err := parser.Parse([]byte(validUniqueGauge)) @@ -390,10 +391,10 @@ test_counter{label="test"} 1 %d ) parser := Parser{IgnoreTimestamp: true} - metric, _ := parser.ParseLine(metricsWithTimestamps) + m, _ := parser.ParseLine(metricsWithTimestamps) - testutil.RequireMetricEqual(t, expected, metric, testutil.IgnoreTime(), testutil.SortMetrics()) - require.WithinDuration(t, time.Now(), metric.Time(), 5*time.Second) + testutil.RequireMetricEqual(t, expected, m, testutil.IgnoreTime(), testutil.SortMetrics()) + require.WithinDuration(t, time.Now(), m.Time(), 5*time.Second) } func parse(buf []byte) ([]telegraf.Metric, error) { @@ -647,3 +648,60 @@ func TestHistogramInfBucketPresence(t *testing.T) { testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } + +const benchmarkData = ` +# HELP benchmark_a Test metric for benchmarking +# TYPE benchmark_a gauge +benchmark_a{source="myhost",tags_platform="python",tags_sdkver="3.11.5"} 5 1653643420000 + +# HELP benchmark_b Test metric for benchmarking +# TYPE benchmark_b gauge +benchmark_b{source="myhost",tags_platform="python",tags_sdkver="3.11.4"} 4 1653643420000 +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{ + IgnoreTimestamp: false, + } + + expected := []telegraf.Metric{ + metric.New( + "prometheus", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "benchmark_a": 5.0, + }, + time.Unix(1653643420, 0), + telegraf.Gauge, + ), + metric.New( + "prometheus", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "benchmark_b": 4.0, + }, + time.Unix(1653643420, 0), + telegraf.Gauge, + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{} + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/prometheusremotewrite/parser_test.go b/plugins/parsers/prometheusremotewrite/parser_test.go index 602fe1db92301..fcdf2e360c339 100644 --- a/plugins/parsers/prometheusremotewrite/parser_test.go +++ b/plugins/parsers/prometheusremotewrite/parser_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -156,3 +157,79 @@ func TestMetricsWithTimestamp(t *testing.T) { require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) } + +var benchmarkData = prompb.WriteRequest{ + Timeseries: []prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "benchmark_a"}, + {Name: "source", Value: "myhost"}, + {Name: "tags_platform", Value: "python"}, + {Name: "tags_sdkver", Value: "3.11.5"}, + }, + Samples: []prompb.Sample{ + {Value: 5.0, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixMilli()}, + }, + }, + { + Labels: []prompb.Label{ + {Name: "__name__", Value: "benchmark_b"}, + {Name: "source", Value: "myhost"}, + {Name: "tags_platform", Value: "python"}, + {Name: "tags_sdkver", Value: "3.11.4"}, + }, + Samples: []prompb.Sample{ + {Value: 4.0, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixMilli()}, + }, + }, + }, +} + +func TestBenchmarkData(t *testing.T) { + expected := []telegraf.Metric{ + metric.New( + "prometheus_remote_write", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "benchmark_a": 5.0, + }, + time.Unix(1585699200, 0), + ), + metric.New( + "prometheus_remote_write", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "benchmark_b": 4.0, + }, + time.Unix(1585699200, 0), + ), + } + + benchmarkData, err := benchmarkData.Marshal() + require.NoError(t, err) + + plugin := &Parser{} + actual, err := plugin.Parse(benchmarkData) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + benchmarkData, err := benchmarkData.Marshal() + require.NoError(b, err) + + plugin := &Parser{} + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse(benchmarkData) + } +} diff --git a/plugins/parsers/value/parser_test.go b/plugins/parsers/value/parser_test.go index 13d45e5b85160..0213bae5252b2 100644 --- a/plugins/parsers/value/parser_test.go +++ b/plugins/parsers/value/parser_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -298,3 +299,34 @@ func TestInvalidDatatype(t *testing.T) { } require.ErrorContains(t, parser.Init(), "unknown datatype") } + +const benchmarkData = `5` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{} + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "", + map[string]string{}, + map[string]interface{}{ + "value": 5, + }, + time.Unix(0, 0), + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{} + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go index 503cc1779d1bb..1b0c4a279172d 100644 --- a/plugins/parsers/wavefront/parser_test.go +++ b/plugins/parsers/wavefront/parser_test.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" ) func TestParse(t *testing.T) { @@ -289,3 +290,52 @@ func TestParseDefaultTags(t *testing.T) { ) require.EqualValues(t, parsedMetrics[0], testMetric) } + +const benchmarkData = `benchmark 5 1653643420 source="myhost" tags_platform="python" tags_sdkver="3.11.5" +benchmark 4 1653643420 source="myhost" tags_platform="python" tags_sdkver="3.11.4" +` + +func TestBenchmarkData(t *testing.T) { + plugin := &Parser{} + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5.0, + }, + time.Unix(1653643420, 0), + ), + metric.New( + "benchmark", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": 4.0, + }, + time.Unix(1653643420, 0), + ), + } + + actual, err := plugin.Parse([]byte(benchmarkData)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) +} + +func BenchmarkParsing(b *testing.B) { + plugin := &Parser{} + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkData)) + } +} diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index e8ba234ae3a39..e2fba76bc60e9 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -14,6 +14,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/file" "github.com/influxdata/telegraf/plugins/parsers/influx" @@ -1184,7 +1185,7 @@ func TestEmptySelection(t *testing.T) { _, err := parser.Parse([]byte(tt.input)) require.Error(t, err) - require.Equal(t, err.Error(), "cannot parse with empty selection node") + require.Equal(t, "cannot parse with empty selection node", err.Error()) }) } } @@ -1494,3 +1495,321 @@ func loadTestConfiguration(filename string) (*Config, []string, error) { err = toml.Unmarshal(buf, &cfg) return &cfg, header, err } + +var benchmarkExpectedMetrics = []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5.0, + }, + time.Unix(1577923199, 0), + ), + metric.New( + "benchmark", + map[string]string{ + "tags_host": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": 4.0, + }, + time.Unix(1577923199, 0), + ), +} + +const benchmarkDataXML = ` + + + + myhost + 3.11.5 + python + 5 + + + myhost + 3.11.4 + python + 4 + +` + +var benchmarkConfigXML = Config{ + Selection: "/Benchmark", + Tags: map[string]string{ + "tags_host": "tags_host", + "tags_sdkver": "tags_sdkver", + "tags_platform": "tags_platform", + }, + Fields: map[string]string{ + "value": "number(value)", + }, + Timestamp: "/Timestamp/@value", + TimestampFmt: "unix", +} + +func TestBenchmarkDataXML(t *testing.T) { + plugin := &Parser{ + DefaultMetricName: "benchmark", + Format: "xml", + Configs: []Config{benchmarkConfigXML}, + Log: testutil.Logger{Name: "parsers.xpath"}, + } + require.NoError(t, plugin.Init()) + + actual, err := plugin.Parse([]byte(benchmarkDataXML)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual) +} + +func BenchmarkParsingXML(b *testing.B) { + plugin := &Parser{ + DefaultMetricName: "benchmark", + Format: "xml", + Configs: []Config{benchmarkConfigXML}, + Log: testutil.Logger{Name: "parsers.xpath", Quiet: true}, + } + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkDataXML)) + } +} + +const benchmarkDataJSON = ` +{ + "timestamp": 1577923199, + "data": [ + { + "tags_host": "myhost", + "tags_sdkver": "3.11.5", + "tags_platform": "python", + "value": 5.0 + }, + { + "tags_host": "myhost", + "tags_sdkver": "3.11.4", + "tags_platform": "python", + "value": 4.0 + } + ] +} +` + +var benchmarkConfigJSON = Config{ + Selection: "data/*", + Tags: map[string]string{ + "tags_host": "tags_host", + "tags_sdkver": "tags_sdkver", + "tags_platform": "tags_platform", + }, + Fields: map[string]string{ + "value": "number(value)", + }, + Timestamp: "//timestamp", + TimestampFmt: "unix", +} + +func TestBenchmarkDataJSON(t *testing.T) { + plugin := &Parser{ + DefaultMetricName: "benchmark", + Format: "xpath_json", + Configs: []Config{benchmarkConfigJSON}, + Log: testutil.Logger{Name: "parsers.xpath"}, + } + require.NoError(t, plugin.Init()) + + actual, err := plugin.Parse([]byte(benchmarkDataJSON)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, benchmarkExpectedMetrics, actual) +} + +func BenchmarkParsingJSON(b *testing.B) { + plugin := &Parser{ + DefaultMetricName: "benchmark", + Format: "xpath_json", + Configs: []Config{benchmarkConfigJSON}, + Log: testutil.Logger{Name: "parsers.xpath", Quiet: true}, + } + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse([]byte(benchmarkDataJSON)) + } +} + +func BenchmarkParsingProtobuf(b *testing.B) { + plugin := &Parser{ + DefaultMetricName: "benchmark", + Format: "xpath_protobuf", + ProtobufMessageDef: "benchmark.proto", + ProtobufMessageType: "benchmark.BenchmarkData", + ProtobufImportPaths: []string{".", "./testcases/protobuf_benchmark"}, + NativeTypes: true, + Configs: []Config{ + { + Selection: "//data", + Timestamp: "timestamp", + TimestampFmt: "unix_ns", + Tags: map[string]string{ + "source": "source", + "tags_sdkver": "tags_sdkver", + "tags_platform": "tags_platform", + }, + Fields: map[string]string{ + "value": "value", + }, + }, + }, + Log: testutil.Logger{Name: "parsers.xpath", Quiet: true}, + } + require.NoError(b, plugin.Init()) + + benchmarkData, err := os.ReadFile(filepath.Join("testcases", "protobuf_benchmark", "message.bin")) + require.NoError(b, err) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse(benchmarkData) + } +} + +var benchmarkDataMsgPack = [][]byte{ + { + 0xdf, 0x00, 0x00, 0x00, 0x05, 0xa9, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0xce, + 0x62, 0x90, 0x98, 0x9d, 0xa5, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x05, 0xa6, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0xa6, 0x6d, 0x79, 0x68, 0x6f, 0x73, 0x74, 0xad, 0x74, 0x61, 0x67, 0x73, 0x5f, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0xa6, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0xab, 0x74, + 0x61, 0x67, 0x73, 0x5f, 0x73, 0x64, 0x6b, 0x76, 0x65, 0x72, 0xa6, 0x33, 0x2e, 0x31, 0x31, 0x2e, + 0x35, + }, + { + 0x85, 0xA6, 0x73, 0x6F, 0x75, 0x72, 0x63, 0x65, 0xA6, 0x6D, 0x79, 0x68, 0x6F, 0x73, 0x74, 0xAB, + 0x74, 0x61, 0x67, 0x73, 0x5F, 0x73, 0x64, 0x6B, 0x76, 0x65, 0x72, 0xA6, 0x33, 0x2E, 0x31, 0x31, + 0x2E, 0x34, 0xAD, 0x74, 0x61, 0x67, 0x73, 0x5F, 0x70, 0x6C, 0x61, 0x74, 0x66, 0x6F, 0x72, 0x6D, + 0xA6, 0x70, 0x79, 0x74, 0x68, 0x6F, 0x6E, 0xA5, 0x76, 0x61, 0x6C, 0x75, 0x65, 0x04, 0xA9, 0x74, + 0x69, 0x6D, 0x65, 0x73, 0x74, 0x61, 0x6D, 0x70, 0xCE, 0x62, 0x90, 0x98, 0x9D, + }, +} + +func TestBenchmarkDataMsgPack(t *testing.T) { + plugin := &Parser{ + DefaultMetricName: "benchmark", + Format: "xpath_msgpack", + Configs: []Config{ + { + Tags: map[string]string{ + "source": "source", + "tags_sdkver": "tags_sdkver", + "tags_platform": "tags_platform", + }, + Fields: map[string]string{ + "value": "number(value)", + }, + Timestamp: "timestamp", + TimestampFmt: "unix", + }, + }, + Log: testutil.Logger{Name: "parsers.xpath", Quiet: true}, + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "benchmark", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.5", + }, + map[string]interface{}{ + "value": 5.0, + }, + time.Unix(1653643421, 0), + ), + metric.New( + "benchmark", + map[string]string{ + "source": "myhost", + "tags_platform": "python", + "tags_sdkver": "3.11.4", + }, + map[string]interface{}{ + "value": 4.0, + }, + time.Unix(1653643421, 0), + ), + } + + actual := make([]telegraf.Metric, 0, 2) + for _, msg := range benchmarkDataMsgPack { + m, err := plugin.Parse(msg) + require.NoError(t, err) + actual = append(actual, m...) + } + testutil.RequireMetricsEqual(t, expected, actual, testutil.SortMetrics()) +} + +func BenchmarkParsingMsgPack(b *testing.B) { + plugin := &Parser{ + DefaultMetricName: "benchmark", + Format: "xpath_msgpack", + Configs: []Config{ + { + Tags: map[string]string{ + "source": "source", + "tags_sdkver": "tags_sdkver", + "tags_platform": "tags_platform", + }, + Fields: map[string]string{ + "value": "number(value)", + }, + Timestamp: "timestamp", + TimestampFmt: "unix", + }, + }, + Log: testutil.Logger{Name: "parsers.xpath", Quiet: true}, + } + require.NoError(b, plugin.Init()) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse(benchmarkDataMsgPack[n%2]) + } +} + +func BenchmarkParsingCBOR(b *testing.B) { + plugin := &Parser{ + DefaultMetricName: "benchmark", + Format: "xpath_cbor", + NativeTypes: true, + Configs: []Config{ + { + Selection: "//data", + Timestamp: "timestamp", + TimestampFmt: "unix_ns", + Tags: map[string]string{ + "source": "source", + "tags_sdkver": "tags_sdkver", + "tags_platform": "tags_platform", + }, + Fields: map[string]string{ + "value": "value", + }, + }, + }, + Log: testutil.Logger{Name: "parsers.xpath", Quiet: true}, + } + require.NoError(b, plugin.Init()) + + benchmarkData, err := os.ReadFile(filepath.Join("testcases", "cbor_benchmark", "message.bin")) + require.NoError(b, err) + + for n := 0; n < b.N; n++ { + _, _ = plugin.Parse(benchmarkData) + } +} diff --git a/plugins/parsers/xpath/testcases/cbor_benchmark/expected.out b/plugins/parsers/xpath/testcases/cbor_benchmark/expected.out new file mode 100644 index 0000000000000..8263f7cebf36a --- /dev/null +++ b/plugins/parsers/xpath/testcases/cbor_benchmark/expected.out @@ -0,0 +1,2 @@ +benchmark,source=myhost,tags_platform=python,tags_sdkver=3.11.5 value=5.0 1653643421000000000 +benchmark,source=myhost,tags_platform=python,tags_sdkver=3.11.4 value=4.0 1653643421000000000 diff --git a/plugins/parsers/xpath/testcases/cbor_benchmark/message.bin b/plugins/parsers/xpath/testcases/cbor_benchmark/message.bin new file mode 100644 index 0000000000000..a447911aed2d7 Binary files /dev/null and b/plugins/parsers/xpath/testcases/cbor_benchmark/message.bin differ diff --git a/plugins/parsers/xpath/testcases/cbor_benchmark/telegraf.conf b/plugins/parsers/xpath/testcases/cbor_benchmark/telegraf.conf new file mode 100644 index 0000000000000..4ceaf0eecf736 --- /dev/null +++ b/plugins/parsers/xpath/testcases/cbor_benchmark/telegraf.conf @@ -0,0 +1,20 @@ +[[inputs.file]] + files = ["./testcases/cbor_benchmark/message.bin"] + data_format = "xpath_cbor" + + xpath_native_types = true + + [[inputs.file.xpath]] + metric_name = "'benchmark'" + metric_selection = "//data" + + timestamp = "timestamp" + timestamp_format = "unix_ns" + + [inputs.file.xpath.tags] + source = "source" + tags_sdkver = "tags_sdkver" + tags_platform = "tags_platform" + + [inputs.file.xpath.fields] + value = "value" diff --git a/plugins/parsers/xpath/testcases/protobuf_benchmark/benchmark.proto b/plugins/parsers/xpath/testcases/protobuf_benchmark/benchmark.proto new file mode 100644 index 0000000000000..74e0f307a5c72 --- /dev/null +++ b/plugins/parsers/xpath/testcases/protobuf_benchmark/benchmark.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package benchmark; + +message Entry { + string source = 1; + string tags_sdkver = 2; + string tags_platform = 3; + double value = 4; + uint64 timestamp = 5; +} + +message BenchmarkData { + repeated Entry data = 1; +} diff --git a/plugins/parsers/xpath/testcases/protobuf_benchmark/expected.out b/plugins/parsers/xpath/testcases/protobuf_benchmark/expected.out new file mode 100644 index 0000000000000..8263f7cebf36a --- /dev/null +++ b/plugins/parsers/xpath/testcases/protobuf_benchmark/expected.out @@ -0,0 +1,2 @@ +benchmark,source=myhost,tags_platform=python,tags_sdkver=3.11.5 value=5.0 1653643421000000000 +benchmark,source=myhost,tags_platform=python,tags_sdkver=3.11.4 value=4.0 1653643421000000000 diff --git a/plugins/parsers/xpath/testcases/protobuf_benchmark/message.bin b/plugins/parsers/xpath/testcases/protobuf_benchmark/message.bin new file mode 100644 index 0000000000000..432e9e3607b30 Binary files /dev/null and b/plugins/parsers/xpath/testcases/protobuf_benchmark/message.bin differ diff --git a/plugins/parsers/xpath/testcases/protobuf_benchmark/telegraf.conf b/plugins/parsers/xpath/testcases/protobuf_benchmark/telegraf.conf new file mode 100644 index 0000000000000..405975deace89 --- /dev/null +++ b/plugins/parsers/xpath/testcases/protobuf_benchmark/telegraf.conf @@ -0,0 +1,24 @@ +[[inputs.file]] + files = ["./testcases/protobuf_benchmark/message.bin"] + data_format = "xpath_protobuf" + + xpath_protobuf_file = "benchmark.proto" + xpath_protobuf_type = "benchmark.BenchmarkData" + xpath_protobuf_import_paths = [".", "./testcases/protobuf_benchmark"] + + xpath_native_types = true + + [[inputs.file.xpath]] + metric_name = "'benchmark'" + metric_selection = "//data" + + timestamp = "timestamp" + timestamp_format = "unix_ns" + + [inputs.file.xpath.tags] + source = "source" + tags_sdkver = "tags_sdkver" + tags_platform = "tags_platform" + + [inputs.file.xpath.fields] + value = "value" diff --git a/plugins/processors/all/filter.go b/plugins/processors/all/filter.go new file mode 100644 index 0000000000000..44f4080fe4b6f --- /dev/null +++ b/plugins/processors/all/filter.go @@ -0,0 +1,5 @@ +//go:build !custom || processors || processors.filter + +package all + +import _ "github.com/influxdata/telegraf/plugins/processors/filter" // register plugin diff --git a/plugins/processors/filter/README.md b/plugins/processors/filter/README.md new file mode 100644 index 0000000000000..52b52b9ec6085 --- /dev/null +++ b/plugins/processors/filter/README.md @@ -0,0 +1,83 @@ +# Filter Processor Plugin + +The filter processor plugin allows to specify a set of rules for metrics +with the ability to _keep_ or _drop_ those metrics. It does _not_ change the +metric. As such a user might want to apply this processor to remove metrics +from the processing/output stream. +__NOTE:__ The filtering is _not_ output specific, but will apply to the metrics +processed by this processor. + +## Global configuration options + +In addition to the plugin-specific configuration settings, plugins support +additional global and plugin configuration settings. These settings are used to +modify metrics, tags, and field or create aliases and configure ordering, etc. +See the [CONFIGURATION.md][CONFIGURATION.md] for more details. + +[CONFIGURATION.md]: ../../../docs/CONFIGURATION.md#plugins + +## Configuration + +```toml @sample.conf +# Filter metrics by the given criteria +[[processors.filter]] + ## Default action if no rule applies + # default = "pass" + + ## Rules to apply on the incoming metrics (multiple rules are possible) + ## The rules are evaluated in order and the first matching rule is applied. + ## In case no rule matches the "default" is applied. + ## All filter criteria in a rule must apply for the rule to match the metric + ## i.e. the criteria are combined by a logical AND. If a criterion is + ## omitted it is NOT applied at all and ignored. + [[processors.filter.rule]] + ## List of metric names to match including glob expressions + # name = [] + + ## List of tag key/values pairs to match including glob expressions + ## ALL given tags keys must exist and at least one value must match + ## for the metric to match the rule. + # tags = {} + + ## List of field keys to match including glob expressions + ## At least one field must exist for the metric to match the rule. + # fields = [] + + ## Action to apply for this rule + ## "pass" will keep the metric and pass it on, while "drop" will remove + ## the metric + # action = "drop" +``` + +## Examples + +Consider a use-case where you collected a bunch of metrics + +```text +machine,source="machine1",status="OK" operating_hours=37i,temperature=23.1 +machine,source="machine2",status="warning" operating_hours=1433i,temperature=48.9,message="too hot" +machine,source="machine3",status="OK" operating_hours=811i,temperature=29.5 +machine,source="machine4",status="failure" operating_hours=1009i,temperature=67.3,message="temperature alert" +``` + +but only want to keep the ones indicating a `status` of `failure` or `warning`: + +```toml +[[processors.filter]] + namepass = ["machine"] + default = "drop" + + [[processors.filter.rule]] + tags = {"status" = ["warning", "failure"]} + action = "pass" +``` + +Alternatively, you can "black-list" the `OK` value via + +```toml +[[processors.filter]] + namepass = ["machine"] + + [[processors.filter.rule]] + tags = {"status" = "OK"} +``` diff --git a/plugins/processors/filter/filter.go b/plugins/processors/filter/filter.go new file mode 100644 index 0000000000000..0c17c36b11357 --- /dev/null +++ b/plugins/processors/filter/filter.go @@ -0,0 +1,75 @@ +//go:generate ../../../tools/readme_config_includer/generator +package filter + +import ( + _ "embed" + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +//go:embed sample.conf +var sampleConfig string + +type Filter struct { + Rules []rule `toml:"rule"` + DefaultAction string `toml:"default"` + Log telegraf.Logger `toml:"-"` + defaultPass bool +} + +func (*Filter) SampleConfig() string { + return sampleConfig +} + +func (f *Filter) Init() error { + // Check the default-action setting + switch f.DefaultAction { + case "", "pass": + f.defaultPass = true + case "drop": + // Do nothing, those options are valid + if len(f.Rules) == 0 { + f.Log.Warn("dropping all metrics as no rule is provided") + } + default: + return fmt.Errorf("invalid default action %q", f.DefaultAction) + } + + // Check and initialize rules + for i := range f.Rules { + if err := f.Rules[i].init(); err != nil { + return fmt.Errorf("initialization of rule %d failed: %w", i+1, err) + } + } + + return nil +} + +func (f *Filter) Apply(in ...telegraf.Metric) []telegraf.Metric { + out := make([]telegraf.Metric, 0, len(in)) + for _, m := range in { + if f.applyRules(m) { + out = append(out, m) + } else { + m.Drop() + } + } + return out +} + +func (f *Filter) applyRules(m telegraf.Metric) bool { + for _, r := range f.Rules { + if pass, applies := r.apply(m); applies { + return pass + } + } + return f.defaultPass +} + +func init() { + processors.Add("Filter", func() telegraf.Processor { + return &Filter{} + }) +} diff --git a/plugins/processors/filter/filter_test.go b/plugins/processors/filter/filter_test.go new file mode 100644 index 0000000000000..76274cbf268bd --- /dev/null +++ b/plugins/processors/filter/filter_test.go @@ -0,0 +1,661 @@ +package filter + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var testmetrics = []telegraf.Metric{ + metric.New( + "packing", + map[string]string{ + "source": "machine A", + "location": "main building", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 37, + "temperature": 23.1, + }, + time.Unix(0, 0), + ), + metric.New( + "foundry", + map[string]string{ + "source": "machine B", + "location": "factory X", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 1337, + "temperature": 19.9, + "pieces": 96878, + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine C", + "location": "factory X", + "status": "failure", + }, + map[string]interface{}{ + "operating_hours": 1009, + "temperature": 67.3, + "message": "temperature alert", + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine D", + "location": "factory Y", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 825, + "temperature": 31.2, + }, + time.Unix(0, 0), + ), +} + +func TestNoRules(t *testing.T) { + logger := &testutil.CaptureLogger{} + plugin := &Filter{ + DefaultAction: "drop", + Log: logger, + } + require.NoError(t, plugin.Init()) + + warnings := logger.Warnings() + require.Len(t, warnings, 1) + require.Contains(t, warnings[0], "dropping all metrics") +} + +func TestInvalidDefaultAction(t *testing.T) { + plugin := &Filter{ + Rules: []rule{{Name: []string{"foo"}}}, + DefaultAction: "foo", + } + require.ErrorContains(t, plugin.Init(), "invalid default action") +} + +func TestNoMetric(t *testing.T) { + plugin := &Filter{ + Rules: []rule{{Name: []string{"*"}}}, + } + require.NoError(t, plugin.Init()) + + input := []telegraf.Metric{} + require.Empty(t, plugin.Apply(input...)) +} + +func TestDropAll(t *testing.T) { + plugin := &Filter{ + Rules: []rule{{Name: []string{"*"}}}, + } + require.NoError(t, plugin.Init()) + require.Empty(t, plugin.Apply(testmetrics...)) +} + +func TestDropDefault(t *testing.T) { + plugin := &Filter{ + Rules: []rule{{Name: []string{"foo"}, Action: "pass"}}, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + require.Empty(t, plugin.Apply(testmetrics...)) +} + +func TestPassAll(t *testing.T) { + plugin := &Filter{ + Rules: []rule{{Name: []string{"*"}, Action: "pass"}}, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + + expected := testmetrics + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestPassDefault(t *testing.T) { + plugin := &Filter{ + Rules: []rule{{Name: []string{"foo"}, Action: "drop"}}, + } + require.NoError(t, plugin.Init()) + + expected := testmetrics + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestNamePass(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Name: []string{"welding"}, + Action: "pass", + }, + }, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "welding", + map[string]string{ + "source": "machine C", + "location": "factory X", + "status": "failure", + }, + map[string]interface{}{ + "operating_hours": 1009, + "temperature": 67.3, + "message": "temperature alert", + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine D", + "location": "factory Y", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 825, + "temperature": 31.2, + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestNameDrop(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Name: []string{"welding"}, + Action: "drop", + }, + }, + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "packing", + map[string]string{ + "source": "machine A", + "location": "main building", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 37, + "temperature": 23.1, + }, + time.Unix(0, 0), + ), + metric.New( + "foundry", + map[string]string{ + "source": "machine B", + "location": "factory X", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 1337, + "temperature": 19.9, + "pieces": 96878, + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestNameGlob(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Name: []string{"*ing"}, + Action: "drop", + }, + }, + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "foundry", + map[string]string{ + "source": "machine B", + "location": "factory X", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 1337, + "temperature": 19.9, + "pieces": 96878, + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestTagPass(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Tags: map[string][]string{"status": {"OK"}}, + Action: "pass", + }, + }, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "packing", + map[string]string{ + "source": "machine A", + "location": "main building", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 37, + "temperature": 23.1, + }, + time.Unix(0, 0), + ), + metric.New( + "foundry", + map[string]string{ + "source": "machine B", + "location": "factory X", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 1337, + "temperature": 19.9, + "pieces": 96878, + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine D", + "location": "factory Y", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 825, + "temperature": 31.2, + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestTagDrop(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Tags: map[string][]string{"status": {"OK"}}, + Action: "drop", + }, + }, + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "welding", + map[string]string{ + "source": "machine C", + "location": "factory X", + "status": "failure", + }, + map[string]interface{}{ + "operating_hours": 1009, + "temperature": 67.3, + "message": "temperature alert", + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestTagMultiple(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Tags: map[string][]string{ + "location": {"factory X", "factory Y"}, + "status": {"OK"}, + }, + Action: "pass", + }, + }, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "foundry", + map[string]string{ + "source": "machine B", + "location": "factory X", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 1337, + "temperature": 19.9, + "pieces": 96878, + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine D", + "location": "factory Y", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 825, + "temperature": 31.2, + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestTagGlob(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Tags: map[string][]string{"location": {"factory *"}}, + Action: "pass", + }, + }, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "foundry", + map[string]string{ + "source": "machine B", + "location": "factory X", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 1337, + "temperature": 19.9, + "pieces": 96878, + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine C", + "location": "factory X", + "status": "failure", + }, + map[string]interface{}{ + "operating_hours": 1009, + "temperature": 67.3, + "message": "temperature alert", + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine D", + "location": "factory Y", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 825, + "temperature": 31.2, + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestTagDoesNotExist(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Tags: map[string][]string{ + "operator": {"peter"}, + "status": {"OK"}, + }, + Action: "pass", + }, + }, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + + require.Empty(t, plugin.Apply(testmetrics...)) +} + +func TestFieldPass(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Fields: []string{"message", "pieces"}, + Action: "pass", + }, + }, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "foundry", + map[string]string{ + "source": "machine B", + "location": "factory X", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 1337, + "temperature": 19.9, + "pieces": 96878, + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine C", + "location": "factory X", + "status": "failure", + }, + map[string]interface{}{ + "operating_hours": 1009, + "temperature": 67.3, + "message": "temperature alert", + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestFieldDrop(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Fields: []string{"message", "pieces"}, + Action: "drop", + }, + }, + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "packing", + map[string]string{ + "source": "machine A", + "location": "main building", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 37, + "temperature": 23.1, + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine D", + "location": "factory Y", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 825, + "temperature": 31.2, + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestFieldGlob(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Fields: []string{"{message,piece*}"}, + Action: "pass", + }, + }, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "foundry", + map[string]string{ + "source": "machine B", + "location": "factory X", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 1337, + "temperature": 19.9, + "pieces": 96878, + }, + time.Unix(0, 0), + ), + metric.New( + "welding", + map[string]string{ + "source": "machine C", + "location": "factory X", + "status": "failure", + }, + map[string]interface{}{ + "operating_hours": 1009, + "temperature": 67.3, + "message": "temperature alert", + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestRuleOrder(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Name: []string{"welding"}, + Action: "drop", + }, + { + Name: []string{"welding"}, + Action: "pass", + }, + }, + DefaultAction: "drop", + } + require.NoError(t, plugin.Init()) + require.Empty(t, plugin.Apply(testmetrics...)) +} + +func TestRuleMultiple(t *testing.T) { + plugin := &Filter{ + Rules: []rule{ + { + Name: []string{"welding"}, + Action: "drop", + }, + { + Name: []string{"foundry"}, + Action: "drop", + }, + }, + DefaultAction: "pass", + } + require.NoError(t, plugin.Init()) + + expected := []telegraf.Metric{ + metric.New( + "packing", + map[string]string{ + "source": "machine A", + "location": "main building", + "status": "OK", + }, + map[string]interface{}{ + "operating_hours": 37, + "temperature": 23.1, + }, + time.Unix(0, 0), + ), + } + actual := plugin.Apply(testmetrics...) + testutil.RequireMetricsEqual(t, expected, actual) +} diff --git a/plugins/processors/filter/rule.go b/plugins/processors/filter/rule.go new file mode 100644 index 0000000000000..6a7cccb646c7c --- /dev/null +++ b/plugins/processors/filter/rule.go @@ -0,0 +1,87 @@ +package filter + +import ( + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" +) + +type rule struct { + Name []string `toml:"name"` + Tags map[string][]string `toml:"tags"` + Fields []string `toml:"fields"` + Action string `toml:"action"` + + nameFilter filter.Filter + fieldFilter filter.Filter + tagFilters map[string]filter.Filter + pass bool +} + +func (r *rule) init() error { + // Check the action setting + switch r.Action { + case "pass": + r.pass = true + case "", "drop": + // Do nothing, those options are valid + default: + return fmt.Errorf("invalid action %q", r.Action) + } + + // Compile the filters + var err error + r.nameFilter, err = filter.Compile(r.Name) + if err != nil { + return fmt.Errorf("creating name filter failed: %w", err) + } + + r.fieldFilter, err = filter.Compile(r.Fields) + if err != nil { + return fmt.Errorf("creating fields filter failed: %w", err) + } + + r.tagFilters = make(map[string]filter.Filter, len(r.Tags)) + for k, values := range r.Tags { + r.tagFilters[k], err = filter.Compile(values) + if err != nil { + return fmt.Errorf("creating tag filter for tag %q failed: %w", k, err) + } + } + + return nil +} + +func (r *rule) apply(m telegraf.Metric) (pass, applies bool) { + // Check the metric name + if r.nameFilter != nil { + if !r.nameFilter.Match(m.Name()) { + return true, false + } + } + + // Check the tags if given + tags := m.Tags() + for k, f := range r.tagFilters { + if value, found := tags[k]; !found || !f.Match(value) { + return true, false + } + } + + // Check the field names + if r.fieldFilter != nil { + var matches bool + for _, field := range m.FieldList() { + if r.fieldFilter.Match(field.Key) { + matches = true + break + } + } + if !matches { + return true, false + } + } + + return r.pass, true +} diff --git a/plugins/processors/filter/sample.conf b/plugins/processors/filter/sample.conf new file mode 100644 index 0000000000000..50231c0896813 --- /dev/null +++ b/plugins/processors/filter/sample.conf @@ -0,0 +1,28 @@ +# Filter metrics by the given criteria +[[processors.filter]] + ## Default action if no rule applies + # default = "pass" + + ## Rules to apply on the incoming metrics (multiple rules are possible) + ## The rules are evaluated in order and the first matching rule is applied. + ## In case no rule matches the "default" is applied. + ## All filter criteria in a rule must apply for the rule to match the metric + ## i.e. the criteria are combined by a logical AND. If a criterion is + ## omitted it is NOT applied at all and ignored. + [[processors.filter.rule]] + ## List of metric names to match including glob expressions + # name = [] + + ## List of tag key/values pairs to match including glob expressions + ## ALL given tags keys must exist and at least one value must match + ## for the metric to match the rule. + # tags = {} + + ## List of field keys to match including glob expressions + ## At least one field must exist for the metric to match the rule. + # fields = [] + + ## Action to apply for this rule + ## "pass" will keep the metric and pass it on, while "drop" will remove + ## the metric + # action = "drop" diff --git a/plugins/processors/ifname/ttl_cache_test.go b/plugins/processors/ifname/ttl_cache_test.go index 5e6cae32e0d3f..6f16756d8d4cb 100644 --- a/plugins/processors/ifname/ttl_cache_test.go +++ b/plugins/processors/ifname/ttl_cache_test.go @@ -24,7 +24,7 @@ func TestTTLCacheExpire(t *testing.T) { _, ok, _ := c.Get("ones") require.False(t, ok) require.Empty(t, c.lru.m) - require.Equal(t, c.lru.l.Len(), 0) + require.Equal(t, 0, c.lru.l.Len()) } func TestTTLCache(t *testing.T) { diff --git a/plugins/processors/scale/scale_test.go b/plugins/processors/scale/scale_test.go index a003c39dd4e9d..4683591bbd5b4 100644 --- a/plugins/processors/scale/scale_test.go +++ b/plugins/processors/scale/scale_test.go @@ -149,7 +149,7 @@ func TestMinMax(t *testing.T) { }, }, { - name: "Missing field Fileds", + name: "Missing field Fields", scale: []scalingValuesMinMax{ { InMin: -1, diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index c7c3614249b4f..dcd04b476f15e 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -193,7 +193,7 @@ with an error. In case you need to call some code that may return an error, you can delegate the call to the built-in function `catch` which takes as argument a `Callable` -and returns the error that occured if any, `None` otherwise. +and returns the error that occurred if any, `None` otherwise. So for example: diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index 2d7e9e65568d9..3e3cb1e63410b 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -47,12 +47,12 @@ func (s *Starlark) Start(_ telegraf.Accumulator) error { return nil } -func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { +func (s *Starlark) Add(origMetric telegraf.Metric, acc telegraf.Accumulator) error { parameters, found := s.GetParameters("apply") if !found { return fmt.Errorf("the parameters of the apply function could not be found") } - parameters[0].(*common.Metric).Wrap(metric) + parameters[0].(*common.Metric).Wrap(origMetric) rv, err := s.Call("apply") if err != nil { @@ -65,6 +65,7 @@ func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { iter := rv.Iterate() defer iter.Done() var v starlark.Value + var origFound bool for iter.Next(&v) { switch v := v.(type) { case *common.Metric: @@ -73,6 +74,17 @@ func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { s.Log.Errorf("Duplicate metric reference detected") continue } + + // Previous metric was found, accept the starlark metric, add + // the original metric to the accumulator + if v.ID == origMetric.HashID() { + origFound = true + m.Accept() + s.results = append(s.results, origMetric) + acc.AddMetric(origMetric) + continue + } + s.results = append(s.results, m) acc.AddMetric(m) default: @@ -82,8 +94,8 @@ func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { // If the script didn't return the original metrics, mark it as // successfully handled. - if !containsMetric(s.results, metric) { - metric.Accept() + if !origFound { + origMetric.Drop() } // clear results @@ -93,15 +105,17 @@ func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { s.results = s.results[:0] case *common.Metric: m := rv.Unwrap() - - // If the script returned a different metric, mark this metric as - // successfully handled. - if m != metric { - metric.Accept() + // If we got the original metric back, use that and drop the new one. + // Otherwise mark the original as accepted and use the new metric. + if origMetric.HashID() == rv.ID { + m.Accept() + acc.AddMetric(origMetric) + } else { + origMetric.Accept() + acc.AddMetric(m) } - acc.AddMetric(m) case starlark.NoneType: - metric.Drop() + origMetric.Drop() default: return fmt.Errorf("invalid type returned: %T", rv) } @@ -111,9 +125,9 @@ func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { func (s *Starlark) Stop() { } -func containsMetric(metrics []telegraf.Metric, metric telegraf.Metric) bool { +func containsMetric(metrics []telegraf.Metric, target telegraf.Metric) bool { for _, m := range metrics { - if m == metric { + if m == target { return true } } diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 5b30daea52986..049519e2a1413 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" "strings" + "sync" "testing" "time" @@ -16,6 +17,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" common "github.com/influxdata/telegraf/plugins/common/starlark" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/testutil" @@ -3332,6 +3334,113 @@ func TestAllScriptTestData(t *testing.T) { } } +func TestTracking(t *testing.T) { + var testCases = []struct { + name string + source string + numMetrics int + }{ + { + name: "return none", + numMetrics: 0, + source: ` +def apply(metric): + return None +`, + }, + { + name: "return empty list of metrics", + numMetrics: 0, + source: ` +def apply(metric): + return [] +`, + }, + { + name: "return original metric", + numMetrics: 1, + source: ` +def apply(metric): + return metric +`, + }, + { + name: "return original metric in a list", + numMetrics: 1, + source: ` +def apply(metric): + return [metric] +`, + }, + { + name: "return new metric", + numMetrics: 1, + source: ` +def apply(metric): + newmetric = Metric("new_metric") + newmetric.fields["vaue"] = 42 + return newmetric +`, + }, + { + name: "return new metric in a list", + numMetrics: 1, + source: ` +def apply(metric): + newmetric = Metric("new_metric") + newmetric.fields["vaue"] = 42 + return [newmetric] +`, + }, + { + name: "return original and new metric in a list", + numMetrics: 2, + source: ` +def apply(metric): + newmetric = Metric("new_metric") + newmetric.fields["vaue"] = 42 + return [metric, newmetric] +`, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + // Create a tracking metric and tap the delivery information + var mu sync.Mutex + delivered := make([]telegraf.DeliveryInfo, 0, 1) + notify := func(di telegraf.DeliveryInfo) { + mu.Lock() + defer mu.Unlock() + delivered = append(delivered, di) + } + + // Configure the plugin + plugin := newStarlarkFromSource(tt.source) + require.NoError(t, plugin.Init()) + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Start(acc)) + + // Process expected metrics and compare with resulting metrics + input, _ := metric.WithTracking(testutil.TestMetric(1.23), notify) + require.NoError(t, plugin.Add(input, acc)) + plugin.Stop() + + // Ensure we get back the correct number of metrics + require.Len(t, acc.GetTelegrafMetrics(), tt.numMetrics) + for _, m := range acc.GetTelegrafMetrics() { + m.Accept() + } + + // Simulate output acknowledging delivery of metrics and check delivery + require.Eventuallyf(t, func() bool { + mu.Lock() + defer mu.Unlock() + return len(delivered) == 1 + }, 1*time.Second, 100*time.Millisecond, "original metric not delivered") + }) + } +} + // parses metric lines out of line protocol following a header, with a trailing blank line func parseMetricsFrom(t *testing.T, lines []string, header string) (metrics []telegraf.Metric) { parser := &influx.Parser{} diff --git a/plugins/secretstores/README.md b/plugins/secretstores/README.md index 32ddb09687a59..891d7a30fa8f7 100644 --- a/plugins/secretstores/README.md +++ b/plugins/secretstores/README.md @@ -6,5 +6,6 @@ This folder contains the plugins for the secret-store functionality: * http: Query secrets from an HTTP endpoint * jose: Javascript Object Signing and Encryption * os: Native tooling provided on Linux, MacOS, or Windows. +* systemd: Secret-store to access systemd secrets See each plugin's README for additional details. diff --git a/plugins/secretstores/all/systemd.go b/plugins/secretstores/all/systemd.go new file mode 100644 index 0000000000000..a8fcf63857fae --- /dev/null +++ b/plugins/secretstores/all/systemd.go @@ -0,0 +1,5 @@ +//go:build !custom || secretstores || secretstores.systemd + +package all + +import _ "github.com/influxdata/telegraf/plugins/secretstores/systemd" // register plugin diff --git a/plugins/secretstores/docker/README.md b/plugins/secretstores/docker/README.md index 8afce0cb69c0b..7ec236dce1623 100644 --- a/plugins/secretstores/docker/README.md +++ b/plugins/secretstores/docker/README.md @@ -82,7 +82,7 @@ Referencing the secret within a plugin occurs by: password = "@{docker_secretstore:secret_for_plugin}" ``` -## Additonal Information +## Additional Information [Docker Secrets in Swarm][2] diff --git a/plugins/secretstores/systemd/README.md b/plugins/secretstores/systemd/README.md new file mode 100644 index 0000000000000..eaf14f4e1a905 --- /dev/null +++ b/plugins/secretstores/systemd/README.md @@ -0,0 +1,246 @@ + +# Systemd Secret-Store Plugin + +The `systemd` plugin allows utilizing credentials and secrets provided by +[systemd][] to the Telegraf service. Systemd ensures that only the intended +service can access the credentials for the lifetime of this service. The +credentials appear as plaintext files to the consuming service but are stored +encrypted on the host system. This encryption can also use TPM2 protection if +available (see [this article][systemd-descr] for details). + +This plugin does not support setting the credentials. See the +[credentials management section](#credential-management) below for how to +setup systemd credentials and how to add credentials + +**Note**: Secrets of this plugin are static and are not updated after startup. + +## Requirements and caveats + +This plugin requires **systemd version 250+** with correctly set-up credentials +via [systemd-creds][] (see [setup section](#credential-management)). +However, to use `ImportCredential`, as done in the default service file, you +need **systemd version 254+** otherwise you need to specify the credentials +using `LoadCredentialEncrypted` in a service-override. + +In the default setup, Telegraf expects credential files to be prefixed with +`telegraf.` and without a custom name setting (i.e. no `--name`). + +It is important to note that when TPM2 sealing is available on the host, +credentials can only be created and used on the **same machine** as decrypting +the secrets requires the encryption key *and* a key stored in TPM2. Therefore, +creating credentials and then copying it to another machine will fail! + +Please be aware that, due to its nature, this plugin is **ONLY** available +when started as a service. It does **NOT** find any credentials when started +manually via the command line! Therefore, `secrets` commands should **not** +be used with this plugin. + +## Usage + +Secrets defined by a store are referenced with `@{:}` +the Telegraf configuration. Only certain Telegraf plugins and options of +support secret stores. To see which plugins and options support +secrets, see their respective documentation (e.g. +`plugins/outputs/influxdb/README.md`). If the plugin's README has the +`Secret-store support` section, it will detail which options support secret +store usage. + +## Configuration + +```toml @sample.conf +# Secret-store to access systemd secrets +[[secretstores.systemd]] + ## Unique identifier for the secretstore. + ## This id can later be used in plugins to reference the secrets + ## in this secret-store via @{:} (mandatory) + id = "systemd" + + ## Path to systemd credentials directory + ## This should not be required as systemd indicates this directory + ## via the CREDENTIALS_DIRECTORY environment variable. + # path = "${CREDENTIALS_DIRECTORY}" + + ## Prefix to remove from systemd credential-filenames to derive secret names + # prefix = "telegraf." + +``` + +Each Secret provided by systemd will be available as file under +`${CREDENTIALS_DIRECTORY}/` for the service. You will **not** be +able to see them as a regular, non-telegraf user. Credential visibility from +other systemd services is mediated by the `User=` and `PrivateMounts=` +service-unit directives for those services. See the +[systemd.exec man-page][systemd-exec] for details. + +## Credential management + +Most steps here are condensed from the [systemd-creds man-page][systemd-creds] +and are using this command. Please also check that man-page as the options +or verbs used here might be outdated for the systemd version you are using. + +**Please note**: We are using `/etc/credstore.encrypted` as our storage +location for encrypted credentials throughout the examples below and assuming +a Telegraf install via package manager. If you are using some other means to +install Telegraf you might need to create that directory. +Furthermore, we assume the secret-store ID to be set to `systemd` in the +examples. + +Setting up systemd-credentials might vary on your distribution or version so +please also check the documentation there. You might also need to install +supporting packages such as `tpm2-tools`. + +### Setup + +If you have not done it already, systemd requires a first-time setup of the +credential system. If you are planning to use the TPM2 chip of your system +for protecting the credentials, you should first make sure that it is +available using + +```shell +sudo systemd-creds has-tpm2 +``` + +The output should look similar to + +```text +partial +-firmware ++driver ++system ++subsystem +``` + +If TPM2 is available on your system, credentials can also be tied to the device +by utilizing TPM2 sealing. See the [systemd-creds man-page][systemd-creds] for +details. + +Now setup the credentials by creating the root key. + +```shell +sudo systemd-creds setup +``` + +A warning may appears if you are storing the generated key on an unencrypted +disk which is not recommended. With this, we are all set to create credentials. + +### Creating credentials + +After setting up the encryption key we can create a new credential using + +```shell +echo -n "john-doe-jr" | sudo systemd-creds encrypt - /etc/credstore.encrypted/telegraf.http_user +``` + +You should now have a file named `telegraf.http_user` containing the encrypted +username. The secret-store later provides the secret using this filename as the +secret's key. +**Please note:**: By default Telegraf strips the `telegraf.` prefix. If you use +a different prefix or no prefix at all you need to adapt the `prefix` setting! + +We can now add more secrets. To avoid potentially leaking the plain-text +credentials through command-history or showing it on the screen we use + +```shell +systemd-ask-password -n | sudo systemd-creds encrypt - /etc/credstore.encrypted/telegraf.http_password +``` + +to interactively enter the password. + +### Using credentials as secrets + +To use the credentials as secrets you need to first instantiate a `systemd` +secret-store by adding + +```toml +[[secretstores.systemd]] + id = "systemd" +``` + +to your Telegraf configuration. Assuming the two example credentials +`http_user` and `http_password` you can now use those as secrets via + +```toml +[[inputs.http]] + urls = ["http://localhost/metrics"] + username = "@{systemd:http_user}" + password = "@{systemd:http_password}" + +``` + +in your plugins. + +### Chaining for unattended start + +When using many secrets or when secrets need to be shared among hosts, listing +all of them in the service file might be cumbersome. Additionally, it is hard +to manually test Telegraf configurations with the `systemd` secret-store as +those secrets are only available when started as a service. + +Here, secret-store chaining comes into play, denoting a setup where one +secret-store, in our case `secretstores.systemd`, is used to unlock another +secret-store (`secretstores.jose` in this example). + +```toml +[[secretstores.systemd]] + id = "systemd" + +[[secretstores.jose]] + id = "mysecrets" + path = "/etc/telegraf/secrets" + password = "@{systemd:initial}" +``` + +Here we assume that an `initial` credential was injected through the service +file. This `initial` secret is then used to unlock the `jose` secret-store +which might provide many different secrets backed by encrypted files. + +Input and output plugins can the use the `jose` secrets (via `@{mysecrets:...}`) +to fill sensitive data such as usernames, passwords or tokens. + +### Troubleshooting + +Please always make sure your systemd version matches Telegraf's requirements, +i.e. you do have version 254 or later. + +When not being able to start the service please check the logs. A common issue +is using the `--name` option which does not work with systemd's +`ImportCredential` setting. +a mismatch between the name stored in the credential (given during +`systemd-creds encrypt`) and the one used in the +`LoadCredentialEncrypted` statement. + +In case you are having trouble referencing credentials in Telegraf, you should +check what is available via + +```shell +CREDENTIALS_DIRECTORY=/etc/credstore.encrypted sudo systemd-creds list +``` + +for the example above you should see + +```text +NAME SECURE SIZE PATH +------------------------------------------------------------------- +telegraf.http_password insecure 146B /etc/credstore.encrypted/telegraf.http_password +telegraf.http_user insecure 142B /etc/credstore.encrypted/telegraf.http_user +``` + +**Please note**: Telegraf's secret management functionality is not helpful here +as credentials are *only* available to the systemd service, not via commandline. + +Remember to remove the `prefix` configured in your secret-store from the `NAME` +column to get the secrets' `key`. + +To get the actual value of a credential use + +```shell +sudo systemd-creds decrypt /etc/credstore.encrypted/telegraf.http_password - +``` + +Please use the above command(s) with care as they do reveal the secret value +of the credential! + +[systemd]: https://www.freedesktop.org/wiki/Software/systemd/ +[systemd-descr]: https://systemd.io/CREDENTIALS +[systemd-creds]: https://www.freedesktop.org/software/systemd/man/systemd-creds.html +[systemd-exec]: https://www.freedesktop.org/software/systemd/man/systemd.exec.html diff --git a/plugins/secretstores/systemd/sample.conf b/plugins/secretstores/systemd/sample.conf new file mode 100644 index 0000000000000..d2b2c7b199e70 --- /dev/null +++ b/plugins/secretstores/systemd/sample.conf @@ -0,0 +1,15 @@ +# Secret-store to access systemd secrets +[[secretstores.systemd]] + ## Unique identifier for the secretstore. + ## This id can later be used in plugins to reference the secrets + ## in this secret-store via @{:} (mandatory) + id = "systemd" + + ## Path to systemd credentials directory + ## This should not be required as systemd indicates this directory + ## via the CREDENTIALS_DIRECTORY environment variable. + # path = "${CREDENTIALS_DIRECTORY}" + + ## Prefix to remove from systemd credential-filenames to derive secret names + # prefix = "telegraf." + diff --git a/plugins/secretstores/systemd/systemd.go b/plugins/secretstores/systemd/systemd.go new file mode 100644 index 0000000000000..005edb039286e --- /dev/null +++ b/plugins/secretstores/systemd/systemd.go @@ -0,0 +1,138 @@ +//go:build linux + +//go:generate ../../../tools/readme_config_includer/generator +package systemd + +import ( + "context" + _ "embed" + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/coreos/go-systemd/v22/dbus" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/secretstores" +) + +const systemdMinimumVersion = 250 + +// Required to be a variable to mock the version in tests +var getSystemdVersion = getSystemdMajorVersion + +//go:embed sample.conf +var sampleConfig string + +type Systemd struct { + Path string `toml:"path"` + Prefix string `toml:"prefix"` + Log telegraf.Logger `toml:"-"` +} + +func (*Systemd) SampleConfig() string { + return sampleConfig +} + +// Init initializes all internals of the secret-store +func (s *Systemd) Init() error { + version, err := getSystemdVersion() + if err != nil { + return fmt.Errorf("unable to detect systemd version: %w", err) + } + s.Log.Debugf("Found systemd version %d...", version) + if version < systemdMinimumVersion { + return fmt.Errorf("systemd version %d below minimum version %d", version, systemdMinimumVersion) + } + + // By default the credentials directory is passed in by systemd + // via the "CREDENTIALS_DIRECTORY" environment variable. + defaultPath := os.Getenv("CREDENTIALS_DIRECTORY") + if defaultPath == "" { + s.Log.Warn("CREDENTIALS_DIRECTORY environment variable undefined. Make sure credentials are setup correctly!") + if s.Path == "" { + return errors.New("'path' required without CREDENTIALS_DIRECTORY") + } + } + + // Use default path if no explicit was specified. This should be the common case. + if s.Path == "" { + s.Path = defaultPath + } + s.Path, err = filepath.Abs(s.Path) + if err != nil { + return fmt.Errorf("cannot determine absolute path of %q: %w", s.Path, err) + } + + // Check if we can access the target directory + if _, err := os.Stat(s.Path); err != nil { + return fmt.Errorf("accessing credentials directory %q failed: %w", s.Path, err) + } + return nil +} + +func (s *Systemd) Get(key string) ([]byte, error) { + secretFile, err := filepath.Abs(filepath.Join(s.Path, s.Prefix+key)) + if err != nil { + return nil, err + } + if filepath.Dir(secretFile) != s.Path { + return nil, fmt.Errorf("invalid directory detected for key %q", key) + } + value, err := os.ReadFile(secretFile) + if err != nil { + return nil, fmt.Errorf("cannot read the secret's value: %w", err) + } + return value, nil +} + +func (s *Systemd) List() ([]string, error) { + secretFiles, err := os.ReadDir(s.Path) + if err != nil { + return nil, fmt.Errorf("cannot read files: %w", err) + } + secrets := make([]string, 0, len(secretFiles)) + for _, entry := range secretFiles { + key := strings.TrimPrefix(entry.Name(), s.Prefix) + secrets = append(secrets, key) + } + return secrets, nil +} + +func (s *Systemd) Set(_, _ string) error { + return errors.New("secret-store does not support creating secrets") +} + +// GetResolver returns a function to resolve the given key. +func (s *Systemd) GetResolver(key string) (telegraf.ResolveFunc, error) { + resolver := func() ([]byte, bool, error) { + s, err := s.Get(key) + return s, false, err + } + return resolver, nil +} + +func getSystemdMajorVersion() (int, error) { + ctx := context.Background() + conn, err := dbus.NewWithContext(ctx) + if err != nil { + return 0, err + } + defer conn.Close() + + fullVersion, err := conn.GetManagerProperty("Version") + if err != nil { + return 0, err + } + fullVersion = strings.Trim(fullVersion, "\"") + return strconv.Atoi(strings.SplitN(fullVersion, ".", 2)[0]) +} + +// Register the secret-store on load. +func init() { + secretstores.Add("systemd", func(_ string) telegraf.SecretStore { + return &Systemd{Prefix: "telegraf."} + }) +} diff --git a/plugins/secretstores/systemd/systemd_nonlinux.go b/plugins/secretstores/systemd/systemd_nonlinux.go new file mode 100644 index 0000000000000..728b1235959c2 --- /dev/null +++ b/plugins/secretstores/systemd/systemd_nonlinux.go @@ -0,0 +1 @@ +package systemd diff --git a/plugins/secretstores/systemd/systemd_test.go b/plugins/secretstores/systemd/systemd_test.go new file mode 100644 index 0000000000000..88f9a6bad3e9e --- /dev/null +++ b/plugins/secretstores/systemd/systemd_test.go @@ -0,0 +1,174 @@ +//go:build linux + +package systemd + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func getSystemdVersionMin() (int, error) { + return systemdMinimumVersion, nil +} + +func TestSampleConfig(t *testing.T) { + plugin := &Systemd{} + require.NotEmpty(t, plugin.SampleConfig()) +} + +func TestMinimumVersion(t *testing.T) { + getSystemdVersion = func() (int, error) { return 123, nil } + + plugin := &Systemd{Log: testutil.Logger{}} + require.ErrorContains(t, plugin.Init(), "below minimum version") +} + +func TestEmptyPath(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + + plugin := &Systemd{Log: testutil.Logger{}} + require.ErrorContains(t, plugin.Init(), "'path' required without CREDENTIALS_DIRECTORY") +} + +func TestEmptyCredentialsDirectoryWarning(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + + logger := &testutil.CaptureLogger{} + plugin := &Systemd{ + Path: "testdata", + Log: logger} + require.NoError(t, plugin.Init()) + + actual := logger.Warnings() + require.Len(t, actual, 1) + require.Contains(t, actual[0], "CREDENTIALS_DIRECTORY environment variable undefined") +} + +func TestPathNonExistentExplicit(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + t.Setenv("CREDENTIALS_DIRECTORY", "testdata") + + plugin := &Systemd{ + Path: "non/existent/path", + Log: testutil.Logger{}, + } + require.ErrorContains(t, plugin.Init(), "accessing credentials directory") +} + +func TestPathNonExistentImplicit(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + t.Setenv("CREDENTIALS_DIRECTORY", "non/existent/path") + + plugin := &Systemd{ + Log: testutil.Logger{}, + } + require.ErrorContains(t, plugin.Init(), "accessing credentials directory") +} + +func TestInit(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + t.Setenv("CREDENTIALS_DIRECTORY", "testdata") + + plugin := &Systemd{Log: testutil.Logger{}} + require.NoError(t, plugin.Init()) +} + +func TestSetNotAvailable(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + t.Setenv("CREDENTIALS_DIRECTORY", "testdata") + + plugin := &Systemd{Log: testutil.Logger{}} + require.NoError(t, plugin.Init()) + + // Try to Store the secrets, which this plugin should not let + require.ErrorContains(t, plugin.Set("foo", "bar"), "secret-store does not support creating secrets") +} + +func TestListGet(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + t.Setenv("CREDENTIALS_DIRECTORY", "testdata") + + // secret files name and their content to compare under the `testdata` directory + secrets := map[string]string{ + "secret-file-1": "IWontTell", + "secret_file_2": "SuperDuperSecret!23", + "secretFile": "foobar", + } + + // Initialize the plugin + plugin := &Systemd{Log: testutil.Logger{}} + require.NoError(t, plugin.Init()) + + // List the Secrets + keys, err := plugin.List() + require.NoError(t, err) + require.Len(t, keys, len(secrets)) + // check if the returned array from List() is the same + // as the name of secret files + for secretFileName := range secrets { + require.Contains(t, keys, secretFileName) + } + + // Get the secrets + for _, k := range keys { + value, err := plugin.Get(k) + require.NoError(t, err) + v, found := secrets[k] + require.Truef(t, found, "unexpected secret requested that was not found: %q", k) + require.Equal(t, v, string(value)) + } +} + +func TestResolver(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + t.Setenv("CREDENTIALS_DIRECTORY", "testdata") + + // Secret Value Name to Resolve + secretFileName := "secret-file-1" + // Secret Value to Resolve To + secretVal := "IWontTell" + + // Initialize the plugin + plugin := &Systemd{Log: testutil.Logger{}} + require.NoError(t, plugin.Init()) + + // Get the resolver + resolver, err := plugin.GetResolver(secretFileName) + require.NoError(t, err) + require.NotNil(t, resolver) + s, dynamic, err := resolver() + require.NoError(t, err) + require.False(t, dynamic) + require.Equal(t, secretVal, string(s)) +} + +func TestResolverInvalid(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + t.Setenv("CREDENTIALS_DIRECTORY", "testdata") + + // Initialize the plugin + plugin := &Systemd{Log: testutil.Logger{}} + require.NoError(t, plugin.Init()) + + // Get the resolver + resolver, err := plugin.GetResolver("foo") + require.NoError(t, err) + require.NotNil(t, resolver) + _, _, err = resolver() + require.ErrorContains(t, err, "cannot read the secret's value:") +} + +func TestGetNonExistant(t *testing.T) { + getSystemdVersion = getSystemdVersionMin + t.Setenv("CREDENTIALS_DIRECTORY", "testdata") + + // Initialize the plugin + plugin := &Systemd{Log: testutil.Logger{}} + require.NoError(t, plugin.Init()) + + // Get the resolver + _, err := plugin.Get("foo") + require.ErrorContains(t, err, "cannot read the secret's value:") +} diff --git a/plugins/secretstores/systemd/testdata/secret-file-1 b/plugins/secretstores/systemd/testdata/secret-file-1 new file mode 100644 index 0000000000000..49bf3f9e9ea2f --- /dev/null +++ b/plugins/secretstores/systemd/testdata/secret-file-1 @@ -0,0 +1 @@ +IWontTell \ No newline at end of file diff --git a/plugins/secretstores/systemd/testdata/secretFile b/plugins/secretstores/systemd/testdata/secretFile new file mode 100644 index 0000000000000..f6ea049518760 --- /dev/null +++ b/plugins/secretstores/systemd/testdata/secretFile @@ -0,0 +1 @@ +foobar \ No newline at end of file diff --git a/plugins/secretstores/systemd/testdata/secret_file_2 b/plugins/secretstores/systemd/testdata/secret_file_2 new file mode 100644 index 0000000000000..4c4c24ef3fd2e --- /dev/null +++ b/plugins/secretstores/systemd/testdata/secret_file_2 @@ -0,0 +1 @@ +SuperDuperSecret!23 \ No newline at end of file diff --git a/plugins/serializers/carbon2/carbon2_test.go b/plugins/serializers/carbon2/carbon2_test.go index 43777e6f04c83..21c4082365472 100644 --- a/plugins/serializers/carbon2/carbon2_test.go +++ b/plugins/serializers/carbon2/carbon2_test.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers" ) func TestSerializeMetricFloat(t *testing.T) { @@ -416,3 +417,26 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { }) } } + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/cloudevents/cloudevents_test.go b/plugins/serializers/cloudevents/cloudevents_test.go index a5b3e65c95a47..435ac1c8dc9eb 100644 --- a/plugins/serializers/cloudevents/cloudevents_test.go +++ b/plugins/serializers/cloudevents/cloudevents_test.go @@ -10,7 +10,6 @@ import ( "path/filepath" "strings" "testing" - "time" cloudevents "github.com/cloudevents/sdk-go/v2" "github.com/gofrs/uuid/v5" @@ -19,10 +18,10 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" ) @@ -235,28 +234,25 @@ func (*dummygen) NewV7() (uuid.UUID, error) { return uuid.UUID([16]byte{}), errors.New("wrong type") } -/* Benchmarks */ -func BenchmarkSerializer(b *testing.B) { - m := metric.New( - "test", - map[string]string{ - "source": "somehost.company.com", - "host": "localhost", - "status": "healthy", - }, - map[string]interface{}{ - "temperature": 23.5, - "operating_hours": 4242, - "connections": 123, - "standby": true, - "SN": "DC5423DE4CE/2", - }, - time.Now(), - ) - - serializer := &Serializer{} - for n := 0; n < b.N; n++ { - _, err := serializer.Serialize(m) +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) require.NoError(b, err) } } diff --git a/plugins/serializers/csv/csv_test.go b/plugins/serializers/csv/csv_test.go index ade0c10cf916e..62c21cc922168 100644 --- a/plugins/serializers/csv/csv_test.go +++ b/plugins/serializers/csv/csv_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" ) @@ -201,3 +202,26 @@ func loadTestConfiguration(filename string) (*Config, []string, error) { func loadCSV(filename string) ([]byte, error) { return os.ReadFile(filename) } + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index 289a0cbcbc69d..f416697074bcf 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -17,8 +17,8 @@ import ( const DefaultTemplate = "host.tags.measurement.field" var ( - compatibleAllowedCharsName = regexp.MustCompile(`[^ "-:\<>-\]_a-~\p{L}]`) - compatibleAllowedCharsValue = regexp.MustCompile(`[^ -:<-~\p{L}]`) + compatibleAllowedCharsName = regexp.MustCompile(`[^ "-:\<>-\]_a-~\p{L}]`) //nolint: gocritic // valid range for use-case + compatibleAllowedCharsValue = regexp.MustCompile(`[^ -:<-~\p{L}]`) //nolint: gocritic // valid range for use-case compatibleLeadingTildeDrop = regexp.MustCompile(`^[~]*(.*)`) hyphenChars = strings.NewReplacer( "/", "-", diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index 3338eff61edbf..f4ad3501d8ead 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers" ) var defaultTags = map[string]string{ @@ -209,7 +210,7 @@ func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) { }, } require.NoError(t, s.Init()) - require.Equal(t, s.Template, "tags.host.measurement.field") + require.Equal(t, "tags.host.measurement.field", s.Template) buf, err := s.Serialize(m1) require.NoError(t, err) @@ -1216,3 +1217,26 @@ func TestSerializeBatchWithTagsSupport(t *testing.T) { }) } } + +func BenchmarkSerialize(b *testing.B) { + s := &GraphiteSerializer{} + require.NoError(b, s.Init()) + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &GraphiteSerializer{} + require.NoError(b, s.Init()) + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go index 7bf3e60f80933..36371f2f57656 100644 --- a/plugins/serializers/influx/influx_test.go +++ b/plugins/serializers/influx/influx_test.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers" ) var tests = []struct { @@ -531,3 +532,26 @@ func TestSerialize_SerializeBatch(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte("cpu value=42 0\ncpu value=42 0\n"), output) } + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index 34ea8dea6561a..d562ca79f59d4 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -17,6 +17,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" ) @@ -488,3 +489,26 @@ func loadJSON(filename string) (interface{}, error) { err = json.Unmarshal(buf, &data) return data, err } + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/msgpack/metric_test.go b/plugins/serializers/msgpack/metric_test.go index e85fe4a020feb..db084470434cd 100644 --- a/plugins/serializers/msgpack/metric_test.go +++ b/plugins/serializers/msgpack/metric_test.go @@ -15,7 +15,7 @@ func TestMsgPackTime32(t *testing.T) { var nsec int64 t1 := MessagePackTime{time: time.Unix(sec, nsec)} - require.Equal(t, t1.Len(), 4) + require.Equal(t, 4, t1.Len()) buf := make([]byte, t1.Len()) require.NoError(t, t1.MarshalBinaryTo(buf)) @@ -33,7 +33,7 @@ func TestMsgPackTime64(t *testing.T) { var nsec int64 = 999999999 t1 := MessagePackTime{time: time.Unix(sec, nsec)} - require.Equal(t, t1.Len(), 8) + require.Equal(t, 8, t1.Len()) buf := make([]byte, t1.Len()) require.NoError(t, t1.MarshalBinaryTo(buf)) @@ -51,7 +51,7 @@ func TestMsgPackTime96(t *testing.T) { var nsec int64 = 111111111 t1 := MessagePackTime{time: time.Unix(sec, nsec)} - require.Equal(t, t1.Len(), 12) + require.Equal(t, 12, t1.Len()) buf := make([]byte, t1.Len()) require.NoError(t, t1.MarshalBinaryTo(buf)) @@ -65,7 +65,7 @@ func TestMsgPackTime96(t *testing.T) { // Testing the default value: 0001-01-01T00:00:00Z t1 = MessagePackTime{} - require.Equal(t, t1.Len(), 12) + require.Equal(t, 12, t1.Len()) require.NoError(t, t1.MarshalBinaryTo(buf)) t2 = new(MessagePackTime) diff --git a/plugins/serializers/msgpack/msgpack_test.go b/plugins/serializers/msgpack/msgpack_test.go index a8ec169e5db71..fe9f706a34045 100644 --- a/plugins/serializers/msgpack/msgpack_test.go +++ b/plugins/serializers/msgpack/msgpack_test.go @@ -7,6 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" ) @@ -130,3 +131,24 @@ func TestSerializeBatch(t *testing.T) { testutil.RequireMetricEqual(t, m, toTelegrafMetric(*decodeM)) } } + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/nowmetric/nowmetric_test.go b/plugins/serializers/nowmetric/nowmetric_test.go index 8eb6956ec31b4..167963d461466 100644 --- a/plugins/serializers/nowmetric/nowmetric_test.go +++ b/plugins/serializers/nowmetric/nowmetric_test.go @@ -10,6 +10,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers" ) func TestSerializeMetricFloat(t *testing.T) { @@ -248,3 +249,26 @@ func TestSerializeInvalidFormat(t *testing.T) { s := &Serializer{Format: "foo"} require.Error(t, s.Init()) } + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/prometheus/prometheus_test.go b/plugins/serializers/prometheus/prometheus_test.go index a7b2b8f9df0d5..ca643c92041f9 100644 --- a/plugins/serializers/prometheus/prometheus_test.go +++ b/plugins/serializers/prometheus/prometheus_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" ) @@ -760,3 +761,26 @@ rpc_duration_seconds_count 2693 }) } } + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go index d8eab63dc7e1c..60b524420fea2 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" ) @@ -734,3 +735,24 @@ func protoToSamples(req *prompb.WriteRequest) model.Samples { } return samples } + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go index e2cd7471f21d9..df9193d7d3c22 100644 --- a/plugins/serializers/splunkmetric/splunkmetric_test.go +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers" ) func TestSerializeMetricFloat(t *testing.T) { @@ -266,3 +267,24 @@ func TestSerializeOmitEvent(t *testing.T) { expS := `{"time":0,"fields":{"metric_name:cpu.system":8,"metric_name:cpu.usage":42}}` require.Equal(t, expS, string(buf)) } + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } +} diff --git a/plugins/serializers/template/template_test.go b/plugins/serializers/template/template_test.go index 5fea65d3c3a5f..fa0ae16cae2b7 100644 --- a/plugins/serializers/template/template_test.go +++ b/plugins/serializers/template/template_test.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers" ) func TestSerializer(t *testing.T) { @@ -170,13 +171,35 @@ func TestSerializeBatch(t *testing.T) { require.NoError(t, err) require.Equal( t, - string(buf), `0: cpu 42 1: cpu 42 -`, +`, string(buf), ) // A batch template should still work when serializing a single metric singleBuf, err := s.Serialize(m) require.NoError(t, err) - require.Equal(t, string(singleBuf), "0: cpu 42\n") + require.Equal(t, "0: cpu 42\n", string(singleBuf)) +} + +func BenchmarkSerialize(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + metrics := serializers.BenchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + s := &Serializer{} + require.NoError(b, s.Init()) + m := serializers.BenchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) + } } diff --git a/plugins/serializers/test_benchmark.go b/plugins/serializers/test_benchmark.go new file mode 100644 index 0000000000000..ce4e8487ec82e --- /dev/null +++ b/plugins/serializers/test_benchmark.go @@ -0,0 +1,31 @@ +package serializers + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +func BenchmarkMetrics(b *testing.B) [4]telegraf.Metric { + b.Helper() + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + newMetric := func(v interface{}) telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": v, + } + m := metric.New("cpu", tags, fields, now) + return m + } + return [4]telegraf.Metric{ + newMetric(91.5), + newMetric(91), + newMetric(true), + newMetric(false), + } +} diff --git a/plugins/serializers/wavefront/wavefront_test.go b/plugins/serializers/wavefront/wavefront_test.go index a14174256753c..6d9aa03bfbe39 100755 --- a/plugins/serializers/wavefront/wavefront_test.go +++ b/plugins/serializers/wavefront/wavefront_test.go @@ -9,8 +9,8 @@ import ( "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/serializers" ) func TestBuildTags(t *testing.T) { @@ -285,31 +285,9 @@ func TestSerializeMetricPrefix(t *testing.T) { require.Equal(t, expS, mS) } -func benchmarkMetrics(b *testing.B) [4]telegraf.Metric { - b.Helper() - now := time.Now() - tags := map[string]string{ - "cpu": "cpu0", - "host": "realHost", - } - newMetric := func(v interface{}) telegraf.Metric { - fields := map[string]interface{}{ - "usage_idle": v, - } - m := metric.New("cpu", tags, fields, now) - return m - } - return [4]telegraf.Metric{ - newMetric(91.5), - newMetric(91), - newMetric(true), - newMetric(false), - } -} - func BenchmarkSerialize(b *testing.B) { s := &Serializer{} - metrics := benchmarkMetrics(b) + metrics := serializers.BenchmarkMetrics(b) b.ResetTimer() for i := 0; i < b.N; i++ { _, err := s.Serialize(metrics[i%len(metrics)]) @@ -319,7 +297,7 @@ func BenchmarkSerialize(b *testing.B) { func BenchmarkSerializeBatch(b *testing.B) { s := &Serializer{} - m := benchmarkMetrics(b) + m := serializers.BenchmarkMetrics(b) metrics := m[:] b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/scripts/ci.docker b/scripts/ci.docker index 029577bff5e69..379d13f48447d 100644 --- a/scripts/ci.docker +++ b/scripts/ci.docker @@ -1,4 +1,4 @@ -FROM golang:1.21.3 +FROM golang:1.21.4 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/installgo_linux.sh b/scripts/installgo_linux.sh index a472983bb23f5..da71f4e7001bf 100644 --- a/scripts/installgo_linux.sh +++ b/scripts/installgo_linux.sh @@ -2,10 +2,10 @@ set -eux -GO_VERSION="1.21.3" +GO_VERSION="1.21.4" GO_ARCH="linux-amd64" # from https://golang.org/dl -GO_VERSION_SHA="1241381b2843fae5a9707eec1f8fb2ef94d827990582c7c7c32f5bdfbfd420c8" +GO_VERSION_SHA="73cac0215254d0c7d1241fa40837851f3b9a8a742d0b54714cbdfb3feaf8f0af" # Download Go and verify Go tarball setup_go () { diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh index 6e06e3f3d955f..498c3a480b451 100644 --- a/scripts/installgo_mac.sh +++ b/scripts/installgo_mac.sh @@ -3,9 +3,9 @@ set -eux ARCH=$(uname -m) -GO_VERSION="1.21.3" -GO_VERSION_SHA_arm64="65302a7a9f7a4834932b3a7a14cb8be51beddda757b567a2f9e0cbd0d7b5a6ab" # from https://golang.org/dl -GO_VERSION_SHA_amd64="27014fc69e301d7588a169ca239b3cc609f0aa1abf38528bf0d20d3b259211eb" # from https://golang.org/dl +GO_VERSION="1.21.4" +GO_VERSION_SHA_arm64="8b7caf2ac60bdff457dba7d4ff2a01def889592b834453431ae3caecf884f6a5" # from https://golang.org/dl +GO_VERSION_SHA_amd64="cd3bdcc802b759b70e8418bc7afbc4a65ca73a3fe576060af9fc8a2a5e71c3b8" # from https://golang.org/dl if [ "$ARCH" = 'arm64' ]; then GO_ARCH="darwin-arm64" diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh index eba7a62f369cd..211e4089d8f7c 100644 --- a/scripts/installgo_windows.sh +++ b/scripts/installgo_windows.sh @@ -2,7 +2,7 @@ set -eux -GO_VERSION="1.21.3" +GO_VERSION="1.21.4" setup_go () { choco upgrade golang --allow-downgrade --force --version=${GO_VERSION} diff --git a/scripts/telegraf.service b/scripts/telegraf.service index 29ec9c055ee50..77b204c4c4e60 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -8,6 +8,7 @@ Wants=network-online.target Type=notify EnvironmentFile=-/etc/default/telegraf User=telegraf +ImportCredential=telegraf.* ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d $TELEGRAF_OPTS ExecReload=/bin/kill -HUP $MAINPID Restart=on-failure @@ -15,6 +16,7 @@ RestartForceExitStatus=SIGPIPE KillMode=mixed TimeoutStopSec=5 LimitMEMLOCK=8M:8M +PrivateMounts=true [Install] WantedBy=multi-user.target diff --git a/scripts/windows-gen-syso.sh b/scripts/windows-gen-syso.sh index fddb900a7df22..5b9d4535fa2af 100755 --- a/scripts/windows-gen-syso.sh +++ b/scripts/windows-gen-syso.sh @@ -6,7 +6,7 @@ NAME="Telegraf" VERSION=$(cd ../../ && make version) FLAGS=() -# If building for arm64, then incude the extra flags required. +# If building for arm64, then include the extra flags required. if [ -n "${1+x}" ] && [ "$1" = "arm64" ]; then FLAGS=(-arm -64) fi diff --git a/testutil/log.go b/testutil/log.go index c81370e234f3f..65fb8a882be91 100644 --- a/testutil/log.go +++ b/testutil/log.go @@ -10,7 +10,8 @@ var _ telegraf.Logger = &Logger{} // Logger defines a logging structure for plugins. type Logger struct { - Name string // Name is the plugin name, will be printed in the `[]`. + Name string // Name is the plugin name, will be printed in the `[]`. + Quiet bool } // Errorf logs an error message, patterned after log.Printf. @@ -25,12 +26,16 @@ func (l Logger) Error(args ...interface{}) { // Debugf logs a debug message, patterned after log.Printf. func (l Logger) Debugf(format string, args ...interface{}) { - log.Printf("D! ["+l.Name+"] "+format, args...) + if !l.Quiet { + log.Printf("D! ["+l.Name+"] "+format, args...) + } } // Debug logs a debug message, patterned after log.Print. func (l Logger) Debug(args ...interface{}) { - log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...) + if !l.Quiet { + log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...) + } } // Warnf logs a warning message, patterned after log.Printf. @@ -45,10 +50,14 @@ func (l Logger) Warn(args ...interface{}) { // Infof logs an information message, patterned after log.Printf. func (l Logger) Infof(format string, args ...interface{}) { - log.Printf("I! ["+l.Name+"] "+format, args...) + if !l.Quiet { + log.Printf("I! ["+l.Name+"] "+format, args...) + } } // Info logs an information message, patterned after log.Print. func (l Logger) Info(args ...interface{}) { - log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...) + if !l.Quiet { + log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...) + } }