From bc9d4df5ffdc47195f67bab5e2875dd840eabeb5 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 20 Jul 2021 12:17:10 -0400 Subject: [PATCH 01/48] config and config_test conflicts resolved --- config/config.go | 267 +++++++++++++++++++++++++++++------------- config/config_test.go | 252 ++++++++++++++++++++++++++++++--------- 2 files changed, 382 insertions(+), 137 deletions(-) diff --git a/config/config.go b/config/config.go index 56beed8ee4910..cac9d8793a743 100644 --- a/config/config.go +++ b/config/config.go @@ -2,6 +2,7 @@ package config import ( "bytes" + "context" "fmt" "io/ioutil" "log" @@ -51,7 +52,7 @@ var ( `"`, `\"`, `\`, `\\`, ) - httpLoadConfigRetryInterval = 10 * time.Second + HttpLoadConfigRetryInterval = 10 * time.Second // fetchURLRe is a regex to determine whether the requested file should // be fetched from a remote or read from the filesystem. @@ -70,13 +71,10 @@ type Config struct { InputFilters []string OutputFilters []string - Agent *AgentConfig - Inputs []*models.RunningInput - Outputs []*models.RunningOutput - Aggregators []*models.RunningAggregator - // Processors have a slice wrapper type because they need to be sorted - Processors models.RunningProcessors - AggProcessors models.RunningProcessors + Agent *AgentConfig + controller AgentController + // ConfigPlugins []ConfigPlugin + // StoragePlugins []StoragePlugin } // NewConfig creates a new struct to hold the Telegraf config. @@ -96,10 +94,6 @@ func NewConfig() *Config { }, Tags: make(map[string]string), - Inputs: make([]*models.RunningInput, 0), - Outputs: make([]*models.RunningOutput, 0), - Processors: make([]*models.RunningProcessor, 0), - AggProcessors: make([]*models.RunningProcessor, 0), InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } @@ -110,7 +104,6 @@ func NewConfig() *Config { MissingField: c.missingTomlField, } c.toml = tomlCfg - return c } @@ -204,7 +197,7 @@ type AgentConfig struct { // InputNames returns a list of strings of the configured inputs. func (c *Config) InputNames() []string { var name []string - for _, input := range c.Inputs { + for _, input := range c.controller.RunningInputs() { name = append(name, input.Config.Name) } return PluginNameCounts(name) @@ -213,8 +206,10 @@ func (c *Config) InputNames() []string { // AggregatorNames returns a list of strings of the configured aggregators. func (c *Config) AggregatorNames() []string { var name []string - for _, aggregator := range c.Aggregators { - name = append(name, aggregator.Config.Name) + for _, processor := range c.controller.RunningProcessors() { + if p, ok := processor.(*models.RunningAggregator); ok { + name = append(name, p.Config.Name) + } } return PluginNameCounts(name) } @@ -222,8 +217,10 @@ func (c *Config) AggregatorNames() []string { // ProcessorNames returns a list of strings of the configured processors. func (c *Config) ProcessorNames() []string { var name []string - for _, processor := range c.Processors { - name = append(name, processor.Config.Name) + for _, processor := range c.controller.RunningProcessors() { + if p, ok := processor.(*models.RunningProcessor); ok { + name = append(name, p.Config.Name) + } } return PluginNameCounts(name) } @@ -231,12 +228,16 @@ func (c *Config) ProcessorNames() []string { // OutputNames returns a list of strings of the configured outputs. func (c *Config) OutputNames() []string { var name []string - for _, output := range c.Outputs { + for _, output := range c.controller.RunningOutputs() { name = append(name, output.Config.Name) } return PluginNameCounts(name) } +func (c *Config) SetAgent(agent AgentController) { + c.controller = agent +} + // PluginNameCounts returns a list of sorted plugin names and their count func PluginNameCounts(plugins []string) []string { names := make(map[string]int) @@ -285,7 +286,6 @@ var header = `# Telegraf Configuration # Environment variables can be used anywhere in this config file, simply surround # them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), # for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) - ` var globalTagsConfig = ` # Global tags can be specified here in key="value" format. @@ -294,7 +294,6 @@ var globalTagsConfig = ` # rack = "1a" ## Environment variables can be used as tags, and throughout the config file # user = "$USER" - ` var agentConfig = ` # Configuration for telegraf agent @@ -379,35 +378,30 @@ var outputHeader = ` ############################################################################### # OUTPUT PLUGINS # ############################################################################### - ` var processorHeader = ` ############################################################################### # PROCESSOR PLUGINS # ############################################################################### - ` var aggregatorHeader = ` ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### - ` var inputHeader = ` ############################################################################### # INPUT PLUGINS # ############################################################################### - ` var serviceInputHeader = ` ############################################################################### # SERVICE INPUT PLUGINS # ############################################################################### - ` // PrintSampleConfig prints the sample config @@ -419,7 +413,7 @@ func PrintSampleConfig( processorFilters []string, ) { // print headers - fmt.Printf(header) + fmt.Println(header) if len(sectionFilters) == 0 { sectionFilters = sectionDefaults @@ -430,11 +424,11 @@ func PrintSampleConfig( if sliceContains("outputs", sectionFilters) { if len(outputFilters) != 0 { if len(outputFilters) >= 3 && outputFilters[1] != "none" { - fmt.Printf(outputHeader) + fmt.Println(outputHeader) } printFilteredOutputs(outputFilters, false) } else { - fmt.Printf(outputHeader) + fmt.Println(outputHeader) printFilteredOutputs(outputDefaults, false) // Print non-default outputs, commented var pnames []string @@ -452,11 +446,11 @@ func PrintSampleConfig( if sliceContains("processors", sectionFilters) { if len(processorFilters) != 0 { if len(processorFilters) >= 3 && processorFilters[1] != "none" { - fmt.Printf(processorHeader) + fmt.Println(processorHeader) } printFilteredProcessors(processorFilters, false) } else { - fmt.Printf(processorHeader) + fmt.Println(processorHeader) pnames := []string{} for pname := range processors.Processors { pnames = append(pnames, pname) @@ -470,11 +464,11 @@ func PrintSampleConfig( if sliceContains("aggregators", sectionFilters) { if len(aggregatorFilters) != 0 { if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" { - fmt.Printf(aggregatorHeader) + fmt.Println(aggregatorHeader) } printFilteredAggregators(aggregatorFilters, false) } else { - fmt.Printf(aggregatorHeader) + fmt.Println(aggregatorHeader) pnames := []string{} for pname := range aggregators.Aggregators { pnames = append(pnames, pname) @@ -488,11 +482,11 @@ func PrintSampleConfig( if sliceContains("inputs", sectionFilters) { if len(inputFilters) != 0 { if len(inputFilters) >= 3 && inputFilters[1] != "none" { - fmt.Printf(inputHeader) + fmt.Println(inputHeader) } printFilteredInputs(inputFilters, false) } else { - fmt.Printf(inputHeader) + fmt.Println(inputHeader) printFilteredInputs(inputDefaults, false) // Print non-default inputs, commented var pnames []string @@ -582,7 +576,7 @@ func printFilteredInputs(inputFilters []string, commented bool) { } sort.Strings(servInputNames) - fmt.Printf(serviceInputHeader) + fmt.Println(serviceInputHeader) for _, name := range servInputNames { printConfig(name, servInputs[name], "inputs", commented) } @@ -608,11 +602,11 @@ func printFilteredOutputs(outputFilters []string, commented bool) { func printFilteredGlobalSections(sectionFilters []string) { if sliceContains("global_tags", sectionFilters) { - fmt.Printf(globalTagsConfig) + fmt.Println(globalTagsConfig) } if sliceContains("agent", sectionFilters) { - fmt.Printf(agentConfig) + fmt.Println(agentConfig) } } @@ -669,7 +663,7 @@ func PrintOutputConfig(name string) error { } // LoadDirectory loads all toml config files found in the specified path, recursively. -func (c *Config) LoadDirectory(path string) error { +func (c *Config) LoadDirectory(ctx context.Context, outputCtx context.Context, path string) error { walkfn := func(thispath string, info os.FileInfo, _ error) error { if info == nil { log.Printf("W! Telegraf is not permitted to read %s", thispath) @@ -688,7 +682,7 @@ func (c *Config) LoadDirectory(path string) error { if len(name) < 6 || name[len(name)-5:] != ".conf" { return nil } - err := c.LoadConfig(thispath) + err := c.LoadConfig(ctx, outputCtx, thispath) if err != nil { return err } @@ -702,7 +696,7 @@ func (c *Config) LoadDirectory(path string) error { // 2. $HOME/.telegraf/telegraf.conf // 3. /etc/telegraf/telegraf.conf // -func getDefaultConfigPath() (string, error) { +func GetDefaultConfigPath() (string, error) { envfile := os.Getenv("TELEGRAF_CONFIG_PATH") homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf") etcfile := "/etc/telegraf/telegraf.conf" @@ -736,10 +730,10 @@ func isURL(str string) bool { } // LoadConfig loads the given config file and applies it to c -func (c *Config) LoadConfig(path string) error { +func (c *Config) LoadConfig(ctx context.Context, outputCtx context.Context, path string) error { var err error if path == "" { - if path, err = getDefaultConfigPath(); err != nil { + if path, err = GetDefaultConfigPath(); err != nil { return err } } @@ -748,14 +742,14 @@ func (c *Config) LoadConfig(path string) error { return fmt.Errorf("Error loading config file %s: %w", path, err) } - if err = c.LoadConfigData(data); err != nil { + if err = c.LoadConfigData(ctx, outputCtx, data); err != nil { return fmt.Errorf("Error loading config file %s: %w", path, err) } return nil } // LoadConfigData loads TOML-formatted config data -func (c *Config) LoadConfigData(data []byte) error { +func (c *Config) LoadConfigData(ctx context.Context, outputCtx context.Context, data []byte) error { tbl, err := parseConfig(data) if err != nil { return fmt.Errorf("Error parsing data: %s", err) @@ -816,12 +810,12 @@ func (c *Config) LoadConfigData(data []byte) error { switch pluginSubTable := pluginVal.(type) { // legacy [outputs.influxdb] support case *ast.Table: - if err = c.addOutput(pluginName, pluginSubTable); err != nil { + if err = c.addOutput(outputCtx, pluginName, pluginSubTable); err != nil { return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { - if err = c.addOutput(pluginName, t); err != nil { + if err = c.addOutput(outputCtx, pluginName, t); err != nil { return fmt.Errorf("error parsing %s array, %w", pluginName, err) } } @@ -838,12 +832,12 @@ func (c *Config) LoadConfigData(data []byte) error { switch pluginSubTable := pluginVal.(type) { // legacy [inputs.cpu] support case *ast.Table: - if err = c.addInput(pluginName, pluginSubTable); err != nil { + if err = c.addInput(ctx, pluginName, pluginSubTable); err != nil { return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { - if err = c.addInput(pluginName, t); err != nil { + if err = c.addInput(ctx, pluginName, t); err != nil { return fmt.Errorf("error parsing %s, %w", pluginName, err) } } @@ -889,19 +883,32 @@ func (c *Config) LoadConfigData(data []byte) error { return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } + case "config": + for pluginName, pluginVal := range subTable.Fields { + switch pluginSubTable := pluginVal.(type) { + case []*ast.Table: + for _, t := range pluginSubTable { + if err = c.addConfigPlugin(ctx, pluginName, t); err != nil { + return fmt.Errorf("Error parsing %s, %s", pluginName, err) + } + } + default: + return fmt.Errorf("Unsupported config format: %s", + pluginName) + } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } + } // Assume it's an input input for legacy config file support if no other // identifiers are present default: - if err = c.addInput(name, subTable); err != nil { + if err = c.addInput(ctx, name, subTable); err != nil { return fmt.Errorf("Error parsing %s, %s", name, err) } } } - if len(c.Processors) > 1 { - sort.Sort(c.Processors) - } - return nil } @@ -957,8 +964,8 @@ func fetchConfig(u *url.URL) ([]byte, error) { if resp.StatusCode != http.StatusOK { if i < retries { - log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, httpLoadConfigRetryInterval, resp.StatusCode) - time.Sleep(httpLoadConfigRetryInterval) + log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, HttpLoadConfigRetryInterval, resp.StatusCode) + time.Sleep(HttpLoadConfigRetryInterval) continue } return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status) @@ -1001,6 +1008,44 @@ func parseConfig(contents []byte) (*ast.Table, error) { return toml.Parse(contents) } +func (c *Config) addConfigPlugin(ctx context.Context, name string, table *ast.Table) error { + creator, ok := ConfigPlugins[name] + if !ok { + return fmt.Errorf("Undefined but requested config plugin: %s", name) + } + configPlugin := creator() + + if stCfg, ok := table.Fields["storage"]; ok { + storageCfgTable := stCfg.(*ast.Table) + if cfg, ok := storageCfgTable.Fields["internal"]; ok { + // create internal storage plugin + // set cfg + sp := StoragePlugins["internal"]() + if err := c.toml.UnmarshalTable(cfg.(*ast.Table), sp); err != nil { + return err + } + reflect.ValueOf(configPlugin).Elem().FieldByName("Storage").Set(reflect.ValueOf(sp)) + } + delete(table.Fields, "storage") + } + + if err := c.toml.UnmarshalTable(table, configPlugin); err != nil { + return err + } + + // TODO: set storage + + // TODO: this init is expecting to see all of the config, I think? + // what it's really getting is a reference to this config object that isn't doing much. + if err := configPlugin.Init(ctx, c, c.controller); err != nil { + return err + } + + go c.controller.RunConfigPlugin(ctx, configPlugin) + + return nil +} + func (c *Config) addAggregator(name string, table *ast.Table) error { creator, ok := aggregators.Aggregators[name] if !ok { @@ -1017,7 +1062,16 @@ func (c *Config) addAggregator(name string, table *ast.Table) error { return err } - c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf)) + ra := models.NewRunningAggregator(aggregator, conf) + + if err := ra.Init(); err != nil { + return err + } + + c.controller.AddProcessor(ra) + + go c.controller.RunProcessor(ra) + return nil } @@ -1036,14 +1090,14 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { if err != nil { return err } - c.Processors = append(c.Processors, rf) - // save a copy for the aggregator - rf, err = c.newRunningProcessor(creator, processorConfig, table) - if err != nil { + if err := rf.Init(); err != nil { return err } - c.AggProcessors = append(c.AggProcessors, rf) + + c.controller.AddProcessor(rf) + + go c.controller.RunProcessor(rf) return nil } @@ -1069,7 +1123,7 @@ func (c *Config) newRunningProcessor( return rf, nil } -func (c *Config) addOutput(name string, table *ast.Table) error { +func (c *Config) addOutput(ctx context.Context, name string, table *ast.Table) error { if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) { return nil } @@ -1099,12 +1153,19 @@ func (c *Config) addOutput(name string, table *ast.Table) error { return err } - ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) - c.Outputs = append(c.Outputs, ro) + ro := models.NewRunningOutput(output, outputConfig, + c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) + + if err := ro.Init(); err != nil { + return err + } + + c.controller.AddOutput(ro) + go c.controller.RunOutput(ctx, ro) return nil } -func (c *Config) addInput(name string, table *ast.Table) error { +func (c *Config) addInput(ctx context.Context, name string, table *ast.Table) error { if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) { return nil } @@ -1124,7 +1185,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { if t, ok := input.(parsers.ParserInput); ok { parser, err := c.buildParser(name, table) if err != nil { - return err + return fmt.Errorf("buildParser: %w", err) } t.SetParser(parser) } @@ -1132,7 +1193,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { if t, ok := input.(parsers.ParserFuncInput); ok { config, err := c.getParserConfig(name, table) if err != nil { - return err + return fmt.Errorf("getParserConfig: %w", err) } t.SetParserFunc(func() (parsers.Parser, error) { return parsers.NewParser(config) @@ -1141,7 +1202,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { pluginConfig, err := c.buildInput(name, table) if err != nil { - return err + return fmt.Errorf("buildInput: %w", err) } if err := c.toml.UnmarshalTable(table, input); err != nil { @@ -1150,7 +1211,14 @@ func (c *Config) addInput(name string, table *ast.Table) error { rp := models.NewRunningInput(input, pluginConfig) rp.SetDefaultTags(c.Tags) - c.Inputs = append(c.Inputs, rp) + + if err := rp.Init(); err != nil { + return fmt.Errorf("init: %w", err) + } + c.controller.AddInput(rp) + + go c.controller.RunInput(rp, time.Now()) + return nil } @@ -1173,6 +1241,9 @@ func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.Aggregato c.getFieldString(tbl, "name_suffix", &conf.MeasurementSuffix) c.getFieldString(tbl, "name_override", &conf.NameOverride) c.getFieldString(tbl, "alias", &conf.Alias) + if !c.getFieldInt64(tbl, "order", &conf.Order) { + conf.Order = -10000000 + int64(tbl.Position.Begin) + } conf.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { @@ -1201,7 +1272,9 @@ func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.Aggregato func (c *Config) buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { conf := &models.ProcessorConfig{Name: name} - c.getFieldInt64(tbl, "order", &conf.Order) + if !c.getFieldInt64(tbl, "order", &conf.Order) { + conf.Order = -10000000 + int64(tbl.Position.Begin) + } c.getFieldString(tbl, "alias", &conf.Alias) if c.hasErrs() { @@ -1652,19 +1725,29 @@ func (c *Config) getFieldInt(tbl *ast.Table, fieldName string, target *int) { } } -func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) { - if node, ok := tbl.Fields[fieldName]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if iAst, ok := kv.Value.(*ast.Integer); ok { - i, err := iAst.Int() - if err != nil { - c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) - return - } - *target = i - } - } +// getFieldInt64 returns true if the value was defined in config +func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) bool { + node, ok := tbl.Fields[fieldName] + if !ok { + return false } + kv, ok := node.(*ast.KeyValue) + if !ok { + c.addError(tbl, fmt.Errorf("expected key/value pair for node %q, but got %t", fieldName, node)) + return false + } + iAst, ok := kv.Value.(*ast.Integer) + if !ok { + c.addError(tbl, fmt.Errorf("expected int variable type for %q, but got %t", fieldName, kv.Value)) + return false + } + i, err := iAst.Int() + if err != nil { + c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) + return false + } + *target = i + return true } func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[]string) { @@ -1745,6 +1828,24 @@ func (c *Config) addError(tbl *ast.Table, err error) { c.errs = append(c.errs, fmt.Errorf("line %d:%d: %w", tbl.Line, tbl.Position, err)) } +// Inputs is a convenience method that lets you get a list of running Inputs from the config +// this is supported for historical reasons +func (c *Config) Inputs() []*models.RunningInput { + return c.controller.RunningInputs() +} + +// Processors is a convenience method that lets you get a list of running Processors from the config +// this is supported for historical reasons +func (c *Config) Processors() []models.ProcessorRunner { + return c.controller.RunningProcessors() +} + +// Outputs is a convenience method that lets you get a list of running Outputs from the config +// this is supported for historical reasons +func (c *Config) Outputs() []*models.RunningOutput { + return c.controller.RunningOutputs() +} + // unwrappable lets you retrieve the original telegraf.Processor from the // StreamingProcessor. This is necessary because the toml Unmarshaller won't // look inside composed types. diff --git a/config/config_test.go b/config/config_test.go index 940b84ada7773..8c066047d6242 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,6 +1,7 @@ -package config +package config_test import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -11,19 +12,28 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" + _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" + _ "github.com/influxdata/telegraf/plugins/inputs/file" "github.com/influxdata/telegraf/plugins/outputs" + _ "github.com/influxdata/telegraf/plugins/outputs/file" "github.com/influxdata/telegraf/plugins/parsers" + _ "github.com/influxdata/telegraf/plugins/processors/rename" "github.com/stretchr/testify/require" ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { - c := NewConfig() + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() require.NoError(t, os.Setenv("MY_TEST_SERVER", "192.168.1.1")) require.NoError(t, os.Setenv("TEST_INTERVAL", "10s")) - c.LoadConfig("./testdata/single_plugin_env_vars.toml") + err := c.LoadConfig(context.Background(), context.Background(), "./testdata/single_plugin_env_vars.toml") + require.NoError(t, err) input := inputs.Inputs["memcached"]().(*MockupInputPlugin) input.Servers = []string{"192.168.1.1"} @@ -54,16 +64,22 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { } inputConfig.Tags = make(map[string]string) + require.Len(t, c.Inputs(), 1) + // Ignore Log and Parser - c.Inputs[0].Input.(*MockupInputPlugin).Log = nil - c.Inputs[0].Input.(*MockupInputPlugin).parser = nil - require.Equal(t, input, c.Inputs[0].Input, "Testdata did not produce a correct mockup struct.") - require.Equal(t, inputConfig, c.Inputs[0].Config, "Testdata did not produce correct input metadata.") + c.Inputs()[0].Input.(*MockupInputPlugin).Log = nil + c.Inputs()[0].Input.(*MockupInputPlugin).parser = nil + require.Equal(t, input, c.Inputs()[0].Input, "Testdata did not produce a correct mockup struct.") + require.Equal(t, inputConfig, c.Inputs()[0].Config, "Testdata did not produce correct input metadata.") } func TestConfig_LoadSingleInput(t *testing.T) { - c := NewConfig() - c.LoadConfig("./testdata/single_plugin.toml") + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + err := c.LoadConfig(context.Background(), context.Background(), "./testdata/single_plugin.toml") + require.NoError(t, err) input := inputs.Inputs["memcached"]().(*MockupInputPlugin) input.Servers = []string{"localhost"} @@ -94,17 +110,22 @@ func TestConfig_LoadSingleInput(t *testing.T) { } inputConfig.Tags = make(map[string]string) + require.Len(t, c.Inputs(), 1) + // Ignore Log and Parser - c.Inputs[0].Input.(*MockupInputPlugin).Log = nil - c.Inputs[0].Input.(*MockupInputPlugin).parser = nil - require.Equal(t, input, c.Inputs[0].Input, "Testdata did not produce a correct memcached struct.") - require.Equal(t, inputConfig, c.Inputs[0].Config, "Testdata did not produce correct memcached metadata.") + c.Inputs()[0].Input.(*MockupInputPlugin).Log = nil + c.Inputs()[0].Input.(*MockupInputPlugin).parser = nil + require.Equal(t, input, c.Inputs()[0].Input, "Testdata did not produce a correct memcached struct.") + require.Equal(t, inputConfig, c.Inputs()[0].Config, "Testdata did not produce correct memcached metadata.") } func TestConfig_LoadDirectory(t *testing.T) { - c := NewConfig() - require.NoError(t, c.LoadConfig("./testdata/single_plugin.toml")) - require.NoError(t, c.LoadDirectory("./testdata/subconfig")) + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/single_plugin.toml")) + require.NoError(t, c.LoadDirectory(context.Background(), context.Background(), "./testdata/subconfig")) // Create the expected data expectedPlugins := make([]*MockupInputPlugin, 4) @@ -189,9 +210,9 @@ func TestConfig_LoadDirectory(t *testing.T) { expectedConfigs[3].Tags = make(map[string]string) // Check the generated plugins - require.Len(t, c.Inputs, len(expectedPlugins)) - require.Len(t, c.Inputs, len(expectedConfigs)) - for i, plugin := range c.Inputs { + require.Len(t, c.Inputs(), len(expectedPlugins)) + require.Len(t, c.Inputs(), len(expectedConfigs)) + for i, plugin := range c.Inputs() { input := plugin.Input.(*MockupInputPlugin) // Check the logger and ignore it for comparison require.NotNil(t, input.Log) @@ -208,59 +229,75 @@ func TestConfig_LoadDirectory(t *testing.T) { } func TestConfig_LoadSpecialTypes(t *testing.T) { - c := NewConfig() - require.NoError(t, c.LoadConfig("./testdata/special_types.toml")) - require.Len(t, c.Inputs, 1) + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + err := c.LoadConfig(context.Background(), context.Background(), "./testdata/special_types.toml") + require.NoError(t, err) + require.Equal(t, 1, len(c.Inputs())) - input, ok := c.Inputs[0].Input.(*MockupInputPlugin) + input, ok := c.Inputs()[0].Input.(*MockupInputPlugin) require.True(t, ok) // Tests telegraf duration parsing. - require.Equal(t, Duration(time.Second), input.WriteTimeout) - // Tests telegraf size parsing. + require.Equal(t, Duration(time.Second), input.WriteTimeout) // Tests telegraf size parsing. require.Equal(t, Size(1024*1024), input.MaxBodySize) // Tests toml multiline basic strings. require.Equal(t, "/path/to/my/cert", strings.TrimRight(input.TLSCert, "\r\n")) } func TestConfig_FieldNotDefined(t *testing.T) { - c := NewConfig() - err := c.LoadConfig("./testdata/invalid_field.toml") + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + err := c.LoadConfig(context.Background(), context.Background(), "./testdata/invalid_field.toml") require.Error(t, err, "invalid field name") require.Equal(t, "Error loading config file ./testdata/invalid_field.toml: plugin inputs.http_listener_v2: line 1: configuration specified the fields [\"not_a_field\"], but they weren't used", err.Error()) } func TestConfig_WrongFieldType(t *testing.T) { - c := NewConfig() - err := c.LoadConfig("./testdata/wrong_field_type.toml") + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + err := c.LoadConfig(context.Background(), context.Background(), "./testdata/wrong_field_type.toml") require.Error(t, err, "invalid field type") require.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Port) cannot unmarshal TOML string into int", err.Error()) - c = NewConfig() - err = c.LoadConfig("./testdata/wrong_field_type2.toml") + c = config.NewConfig() + c.SetAgent(agentController) + err = c.LoadConfig(context.Background(), context.Background(), "./testdata/wrong_field_type2.toml") require.Error(t, err, "invalid field type2") require.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Methods) cannot unmarshal TOML string into []string", err.Error()) } func TestConfig_InlineTables(t *testing.T) { // #4098 - c := NewConfig() - require.NoError(t, c.LoadConfig("./testdata/inline_table.toml")) + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/inline_table.toml")) require.Len(t, c.Outputs, 2) - output, ok := c.Outputs[1].Output.(*MockupOuputPlugin) + output, ok := c.Outputs()[1].Output.(*MockupOuputPlugin) require.True(t, ok) require.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, output.Headers) - require.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) + require.Equal(t, []string{"org_id"}, c.Outputs()[0].Config.Filter.TagInclude) } func TestConfig_SliceComment(t *testing.T) { t.Skipf("Skipping until #3642 is resolved") - c := NewConfig() - require.NoError(t, c.LoadConfig("./testdata/slice_comment.toml")) + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/slice_comment.toml")) require.Len(t, c.Outputs, 1) - output, ok := c.Outputs[0].Output.(*MockupOuputPlugin) + output, ok := c.Outputs()[0].Output.(*MockupOuputPlugin) require.True(t, ok) require.Equal(t, []string{"test"}, output.Scopes) } @@ -268,20 +305,26 @@ func TestConfig_SliceComment(t *testing.T) { func TestConfig_BadOrdering(t *testing.T) { // #3444: when not using inline tables, care has to be taken so subsequent configuration // doesn't become part of the table. This is not a bug, but TOML syntax. - c := NewConfig() - err := c.LoadConfig("./testdata/non_slice_slice.toml") + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + err := c.LoadConfig(context.Background(), context.Background(), "./testdata/non_slice_slice.toml") require.Error(t, err, "bad ordering") require.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) } func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { // #8256 Cannot use empty string as the namespace prefix - c := NewConfig() - require.NoError(t, c.LoadConfig("./testdata/azure_monitor.toml")) + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/azure_monitor.toml")) require.Len(t, c.Outputs, 2) expectedPrefix := []string{"Telegraf/", ""} - for i, plugin := range c.Outputs { + for i, plugin := range c.Outputs() { output, ok := plugin.Output.(*MockupOuputPlugin) require.True(t, ok) require.Equal(t, expectedPrefix[i], output.NamespacePrefix) @@ -289,7 +332,7 @@ func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { } func TestConfig_URLRetries3Fails(t *testing.T) { - httpLoadConfigRetryInterval = 0 * time.Second + config.HttpLoadConfigRetryInterval = 0 * time.Second responseCounter := 0 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) @@ -299,15 +342,18 @@ func TestConfig_URLRetries3Fails(t *testing.T) { expected := fmt.Sprintf("Error loading config file %s: Retry 3 of 3 failed to retrieve remote config: 404 Not Found", ts.URL) - c := NewConfig() - err := c.LoadConfig(ts.URL) + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + err := c.LoadConfig(context.Background(), context.Background(), ts.URL) require.Error(t, err) require.Equal(t, expected, err.Error()) require.Equal(t, 4, responseCounter) } func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { - httpLoadConfigRetryInterval = 0 * time.Second + config.HttpLoadConfigRetryInterval = 0 * time.Second responseCounter := 0 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if responseCounter <= 2 { @@ -319,8 +365,11 @@ func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { })) defer ts.Close() - c := NewConfig() - require.NoError(t, c.LoadConfig(ts.URL)) + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + require.NoError(t, c.LoadConfig(context.Background(), context.Background(), ts.URL)) require.Equal(t, 4, responseCounter) } @@ -330,19 +379,25 @@ func TestConfig_getDefaultConfigPathFromEnvURL(t *testing.T) { })) defer ts.Close() - c := NewConfig() + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() err := os.Setenv("TELEGRAF_CONFIG_PATH", ts.URL) require.NoError(t, err) - configPath, err := getDefaultConfigPath() + configPath, err := config.GetDefaultConfigPath() require.NoError(t, err) require.Equal(t, ts.URL, configPath) - err = c.LoadConfig("") + err = c.LoadConfig(context.Background(), context.Background(), "") require.NoError(t, err) } func TestConfig_URLLikeFileName(t *testing.T) { - c := NewConfig() - err := c.LoadConfig("http:##www.example.com.conf") + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + err := c.LoadConfig(context.Background(), context.Background(), "http:##www.example.com.conf") require.Error(t, err) if runtime.GOOS == "windows" { @@ -353,6 +408,95 @@ func TestConfig_URLLikeFileName(t *testing.T) { } } +func TestConfig_OrderingProcessorsWithAggregators(t *testing.T) { + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + err := c.LoadConfig(context.Background(), context.Background(), "./testdata/processor_and_aggregator_order.toml") + require.NoError(t, err) + require.Equal(t, 1, len(c.Inputs())) + require.Equal(t, 4, len(c.Processors())) + require.Equal(t, 1, len(c.Outputs())) + + actual := map[string]int64{} + expected := map[string]int64{ + "aggregators.minmax::one": 1, + "processors.rename::two": 2, + "aggregators.minmax::three": 3, + "processors.rename::four": 4, + } + for _, p := range c.Processors() { + actual[p.LogName()] = p.Order() + } + require.EqualValues(t, expected, actual) +} + +func TestConfig_DefaultOrderingProcessorsWithAggregators(t *testing.T) { + c := config.NewConfig() + agentController := &testAgentController{} + c.SetAgent(agentController) + defer agentController.reset() + err := c.LoadConfig(context.Background(), context.Background(), "./testdata/processor_and_aggregator_unordered.toml") + require.NoError(t, err) + require.Equal(t, 1, len(c.Inputs())) + require.Equal(t, 4, len(c.Processors())) + require.Equal(t, 1, len(c.Outputs())) + + actual := map[string]int64{} + // negative orders are defaults based on file position order -10,000,000 + expected := map[string]int64{ + "aggregators.minmax::one": -9999984, + "processors.rename::two": -9999945, + "aggregators.minmax::three": -9999907, + "processors.rename::four": 4, + } + for _, p := range c.Processors() { + actual[p.LogName()] = p.Order() + } + require.EqualValues(t, expected, actual) +} + +type testAgentController struct { + inputs []*models.RunningInput + processors []models.ProcessorRunner + outputs []*models.RunningOutput + // configs []*config.RunningConfigPlugin +} + +func (a *testAgentController) reset() { + a.inputs = nil + a.processors = nil + a.outputs = nil + // a.configs = nil +} + +func (a *testAgentController) RunningInputs() []*models.RunningInput { + return a.inputs +} +func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { + return a.processors +} +func (a *testAgentController) RunningOutputs() []*models.RunningOutput { + return a.outputs +} +func (a *testAgentController) AddInput(input *models.RunningInput) { + a.inputs = append(a.inputs, input) +} +func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { + a.processors = append(a.processors, processor) +} +func (a *testAgentController) AddOutput(output *models.RunningOutput) { + a.outputs = append(a.outputs, output) +} +func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} +func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} +func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} +func (a *testAgentController) StopInput(i *models.RunningInput) {} +func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) StopOutput(p *models.RunningOutput) {} + /*** Mockup INPUT plugin for testing to avoid cyclic dependencies ***/ type MockupInputPlugin struct { Servers []string `toml:"servers"` From e5dd73e2db8e2a5550ec88e4757712319652c200 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 20 Jul 2021 12:21:45 -0400 Subject: [PATCH 02/48] config plugins, and testdata --- config/plugins.go | 53 +++++++++++++++++++ .../processor_and_aggregator_order.toml | 14 +++++ .../processor_and_aggregator_unordered.toml | 11 ++++ config/testdata/telegraf-agent.toml | 14 ----- 4 files changed, 78 insertions(+), 14 deletions(-) create mode 100644 config/plugins.go create mode 100644 config/testdata/processor_and_aggregator_order.toml create mode 100644 config/testdata/processor_and_aggregator_unordered.toml diff --git a/config/plugins.go b/config/plugins.go new file mode 100644 index 0000000000000..475917a673377 --- /dev/null +++ b/config/plugins.go @@ -0,0 +1,53 @@ +package config + +import ( + "context" + "time" + + "github.com/influxdata/telegraf/models" +) + +type ConfigCreator func() ConfigPlugin +type StorageCreator func() StoragePlugin + +var ( + ConfigPlugins = map[string]ConfigCreator{} + StoragePlugins = map[string]StorageCreator{} +) + +// ConfigPlugin is the interface for implemnting plugins that change how Telegraf works +type ConfigPlugin interface { + GetName() string + Init(ctx context.Context, cfg *Config, agent AgentController) error + Close() error +} + +// StoragePlugin is the interface to implement if you're building a plugin that implements state storage +type StoragePlugin interface { + Init() error + + Load(namespace, key string, obj interface{}) error + Save(namespace, key string, obj interface{}) error + + Close() error +} + +// AgentController represents the plugin management that the agent is currently doing +type AgentController interface { + RunningInputs() []*models.RunningInput + RunningProcessors() []models.ProcessorRunner + RunningOutputs() []*models.RunningOutput + + AddInput(input *models.RunningInput) + AddProcessor(processor models.ProcessorRunner) + AddOutput(output *models.RunningOutput) + + RunInput(input *models.RunningInput, startTime time.Time) + RunProcessor(p models.ProcessorRunner) + RunOutput(ctx context.Context, output *models.RunningOutput) + RunConfigPlugin(ctx context.Context, plugin ConfigPlugin) + + StopInput(i *models.RunningInput) + StopProcessor(p models.ProcessorRunner) + StopOutput(p *models.RunningOutput) +} diff --git a/config/testdata/processor_and_aggregator_order.toml b/config/testdata/processor_and_aggregator_order.toml new file mode 100644 index 0000000000000..b9496d9e38024 --- /dev/null +++ b/config/testdata/processor_and_aggregator_order.toml @@ -0,0 +1,14 @@ +[[inputs.file]] +[[aggregators.minmax]] + alias = "one" + order = 1 +[[processors.rename]] + alias = "two" + order = 2 +[[aggregators.minmax]] + alias = "three" + order = 3 +[[processors.rename]] + alias = "four" + order = 4 +[[outputs.file]] diff --git a/config/testdata/processor_and_aggregator_unordered.toml b/config/testdata/processor_and_aggregator_unordered.toml new file mode 100644 index 0000000000000..4e98d2fcc9ee9 --- /dev/null +++ b/config/testdata/processor_and_aggregator_unordered.toml @@ -0,0 +1,11 @@ +[[inputs.file]] +[[aggregators.minmax]] + alias = "one" +[[processors.rename]] + alias = "two" +[[aggregators.minmax]] + alias = "three" +[[processors.rename]] + alias = "four" + order = 4 +[[outputs.file]] diff --git a/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml index 6967d6e862277..a0add155b77e7 100644 --- a/config/testdata/telegraf-agent.toml +++ b/config/testdata/telegraf-agent.toml @@ -154,20 +154,6 @@ ## Offset (must be either "oldest" or "newest") offset = "oldest" -# read metrics from a Kafka legacy topic -[[inputs.kafka_consumer_legacy]] - ## topic(s) to consume - topics = ["telegraf"] - # an array of Zookeeper connection strings - zookeeper_peers = ["localhost:2181"] - ## the name of the consumer group - consumer_group = "telegraf_metrics_consumers" - # Maximum number of points to buffer between collection intervals - point_buffer = 100000 - ## Offset (must be either "oldest" or "newest") - offset = "oldest" - - # Read metrics from a LeoFS Server via SNMP [[inputs.leofs]] # An array of URI to gather stats about LeoFS. From 839fdc06440021955bc268e8e4d30084868a5a22 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 20 Jul 2021 11:23:35 -0500 Subject: [PATCH 03/48] agent refactor --- .gitignore | 1 + accumulator.go | 2 + agent/accumulator.go | 48 ++ agent/agent.go | 1284 +++++++++++++------------------- agent/agent_test.go | 124 ++- agent/tick_test.go | 6 +- internal/rotate/file_writer.go | 3 +- 7 files changed, 644 insertions(+), 824 deletions(-) diff --git a/.gitignore b/.gitignore index 7c3fbd21c3535..b284a75bdae22 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ .DS_Store process.yml /.vscode +testdb.db diff --git a/accumulator.go b/accumulator.go index 1ea5737a84a99..95807fbe7a46c 100644 --- a/accumulator.go +++ b/accumulator.go @@ -52,6 +52,8 @@ type Accumulator interface { // Upgrade to a TrackingAccumulator with space for maxTracked // metrics/batches. WithTracking(maxTracked int) TrackingAccumulator + + WithNewMetricMaker(logName string, logger Logger, f func(metric Metric) Metric) Accumulator } // TrackingID uniquely identifies a tracked metric group diff --git a/agent/accumulator.go b/agent/accumulator.go index 3683b6767d47f..705f38b49b116 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -14,6 +14,8 @@ type MetricMaker interface { } type accumulator struct { + sync.Mutex + sync.Cond maker MetricMaker metrics chan<- telegraf.Metric precision time.Duration @@ -28,6 +30,8 @@ func NewAccumulator( metrics: metrics, precision: time.Nanosecond, } + acc.Cond.L = &acc.Mutex + return &acc } @@ -77,8 +81,13 @@ func (ac *accumulator) AddHistogram( } func (ac *accumulator) AddMetric(m telegraf.Metric) { + ac.Lock() + defer ac.Unlock() m.SetTime(m.Time().Round(ac.precision)) if m := ac.maker.MakeMetric(m); m != nil { + if ac.metrics == nil { + ac.Cond.Wait() // unlock and wait for metrics to be set. + } ac.metrics <- m } } @@ -91,11 +100,24 @@ func (ac *accumulator) addFields( t ...time.Time, ) { m := metric.New(measurement, tags, fields, ac.getTime(t), tp) + ac.Lock() + defer ac.Unlock() if m := ac.maker.MakeMetric(m); m != nil { + if ac.metrics == nil { + ac.Cond.Wait() // unlock and wait for metrics to be set. + } ac.metrics <- m } } +// setOutput changes the destination of the accumulator +func (ac *accumulator) setOutput(outCh chan<- telegraf.Metric) { + ac.Lock() + defer ac.Unlock() + ac.metrics = outCh + ac.Cond.Broadcast() +} + // AddError passes a runtime error to the accumulator. // The error will be tagged with the plugin name and written to the log. func (ac *accumulator) AddError(err error) { @@ -126,6 +148,32 @@ func (ac *accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator } } +func (ac *accumulator) WithNewMetricMaker(logName string, logger telegraf.Logger, f func(metric telegraf.Metric) telegraf.Metric) telegraf.Accumulator { + return &metricMakerAccumulator{ + Accumulator: ac, + makerFunc: f, + logName: logName, + logger: logger, + } +} + +type metricMakerAccumulator struct { + telegraf.Accumulator + logName string + logger telegraf.Logger + makerFunc func(m telegraf.Metric) telegraf.Metric +} + +func (ma *metricMakerAccumulator) LogName() string { + return ma.logName +} +func (ma *metricMakerAccumulator) Log() telegraf.Logger { + return ma.logger +} +func (ma *metricMakerAccumulator) MakeMetric(m telegraf.Metric) telegraf.Metric { + return ma.makerFunc(m) +} + type trackingAccumulator struct { telegraf.Accumulator delivered chan telegraf.DeliveryInfo diff --git a/agent/agent.go b/agent/agent.go index 78097bcd47731..a7e21e07a0a44 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -20,64 +20,64 @@ import ( // Agent runs a set of plugins. type Agent struct { Config *config.Config + + // units hold channels and define connections between plugins + outputGroupUnit outputGroupUnit + processorGroupUnit processorGroupUnit + // inputUnit inputUnit + inputGroupUnit inputGroupUnit + configPluginUnit configPluginUnit + + ctx context.Context + + started *sync.Cond // a condition that lets plugins know when hasStarted is true (when the agent starts) + hasStarted bool } // NewAgent returns an Agent for the given Config. -func NewAgent(config *config.Config) (*Agent, error) { - a := &Agent{ - Config: config, +func NewAgent(ctx context.Context, config *config.Config) *Agent { + inputDestCh := make(chan telegraf.Metric) + outputSrcCh := make(chan telegraf.Metric) + + // by default, connect the dest of the inputs directly to the src for the outputs, + // as processors are added, they will be inserted between these two. + + return &Agent{ + Config: config, + ctx: ctx, + started: sync.NewCond(&sync.Mutex{}), + inputGroupUnit: inputGroupUnit{ + dst: inputDestCh, + relay: channel.NewRelay(inputDestCh, outputSrcCh), + }, + outputGroupUnit: outputGroupUnit{ + src: outputSrcCh, + }, } - return a, nil } -// inputUnit is a group of input plugins and the shared channel they write to. -// -// ┌───────┐ -// │ Input │───┐ -// └───────┘ │ -// ┌───────┐ │ ______ -// │ Input │───┼──▶ ()_____) -// └───────┘ │ -// ┌───────┐ │ -// │ Input │───┘ -// └───────┘ type inputUnit struct { - dst chan<- telegraf.Metric - inputs []*models.RunningInput + input *models.RunningInput + cancelGather context.CancelFunc // used to cancel the gather loop for plugin shutdown +} + +type configPluginUnit struct { + sync.Mutex + plugins []config.ConfigPlugin } // ______ ┌───────────┐ ______ // ()_____)──▶ │ Processor │──▶ ()_____) // └───────────┘ type processorUnit struct { - src <-chan telegraf.Metric - dst chan<- telegraf.Metric - processor *models.RunningProcessor + order int + src chan telegraf.Metric // owns this src + dst chan<- telegraf.Metric // reference to another chan owned elsewhere + processor models.ProcessorRunner + accumulator *accumulator } -// aggregatorUnit is a group of Aggregators and their source and sink channels. -// Typically the aggregators write to a processor channel and pass the original -// metrics to the output channel. The sink channels may be the same channel. -// -// ┌────────────┐ -// ┌──▶ │ Aggregator │───┐ -// │ └────────────┘ │ -// ______ │ ┌────────────┐ │ ______ -// ()_____)───┼──▶ │ Aggregator │───┼──▶ ()_____) -// │ └────────────┘ │ -// │ ┌────────────┐ │ -// ├──▶ │ Aggregator │───┘ -// │ └────────────┘ -// │ ______ -// └────────────────────────▶ ()_____) -type aggregatorUnit struct { - src <-chan telegraf.Metric - aggC chan<- telegraf.Metric - outputC chan<- telegraf.Metric - aggregators []*models.RunningAggregator -} - -// outputUnit is a group of Outputs and their source channel. Metrics on the +// outputGroupUnit is a group of Outputs and their source channel. Metrics on the // channel are written to all outputs. // // ┌────────┐ @@ -85,342 +85,215 @@ type aggregatorUnit struct { // │ └────────┘ // ______ ┌─────┐ │ ┌────────┐ // ()_____)──▶ │ Fan │───┼──▶ │ Output │ -// └─────┘ │ └────────┘ +// src └─────┘ │ └────────┘ // │ ┌────────┐ // └──▶ │ Output │ // └────────┘ -type outputUnit struct { - src <-chan telegraf.Metric - outputs []*models.RunningOutput +type outputGroupUnit struct { + sync.Mutex + src chan telegraf.Metric + outputs []outputUnit } -// Run starts and runs the Agent until the context is done. -func (a *Agent) Run(ctx context.Context) error { - log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ - "Flush Interval:%s", - time.Duration(a.Config.Agent.Interval), a.Config.Agent.Quiet, - a.Config.Agent.Hostname, time.Duration(a.Config.Agent.FlushInterval)) - - log.Printf("D! [agent] Initializing plugins") - err := a.initPlugins() - if err != nil { - return err - } - - startTime := time.Now() - - log.Printf("D! [agent] Connecting outputs") - next, ou, err := a.startOutputs(ctx, a.Config.Outputs) - if err != nil { - return err - } - - var apu []*processorUnit - var au *aggregatorUnit - if len(a.Config.Aggregators) != 0 { - aggC := next - if len(a.Config.AggProcessors) != 0 { - aggC, apu, err = a.startProcessors(next, a.Config.AggProcessors) - if err != nil { - return err - } - } +type outputUnit struct { + output *models.RunningOutput + cancelFlush context.CancelFunc // used to cancel the flush loop for plugin shutdown +} - next, au = a.startAggregators(aggC, next, a.Config.Aggregators) - } +type processorGroupUnit struct { + sync.Mutex + accumulator telegraf.Accumulator + processorUnits []*processorUnit +} - var pu []*processorUnit - if len(a.Config.Processors) != 0 { - next, pu, err = a.startProcessors(next, a.Config.Processors) - if err != nil { - return err +func (pg *processorGroupUnit) Find(pr models.ProcessorRunner) *processorUnit { + for _, unit := range pg.processorUnits { + if unit.processor.GetID() == pr.GetID() { + return unit } } + return nil +} - iu, err := a.startInputs(next, a.Config.Inputs) - if err != nil { - return err - } - - var wg sync.WaitGroup - wg.Add(1) +// inputGroupUnit is a group of input plugins and the shared channel they write to. +// +// ┌───────┐ +// │ Input │───┐ +// └───────┘ │ +// ┌───────┐ │ ______ +// │ Input │───┼──▶ ()_____) +// └───────┘ │ +// ┌───────┐ │ +// │ Input │───┘ +// └───────┘ +type inputGroupUnit struct { + sync.Mutex + dst chan<- telegraf.Metric // owns channel; must stay open until app is shutting down + relay *channel.Relay + inputUnits []inputUnit +} + +// RunWithAPI runs Telegraf in API mode where all the plugins are controlled by +// the user through the config API. When running in this mode plugins are not +// loaded from the toml file. +// if ctx errors (eg cancels), inputs will be notified to shutdown +// during shutdown, once the output fanout has received the last metric, +// outputCancel is called as a sort of callback to notify the outputs to finish up. +// you don't want outputs subscribing to the same ctx as the inputs, +// otherwise they'd stop working before receiving all the messages (this is because +// they maintain their own internal buffers rather than depending on metrics buffered +// in a channel) +func (a *Agent) RunWithAPI(outputCancel context.CancelFunc) { go func() { - defer wg.Done() - a.runOutputs(ou) + a.runOutputFanout() + // then the fanout closes, notify the outputs that they won't be receiving + // more metrics via this cancel function. + outputCancel() }() - if au != nil { - wg.Add(1) - go func() { - defer wg.Done() - a.runProcessors(apu) - }() - - wg.Add(1) - go func() { - defer wg.Done() - a.runAggregators(startTime, au) - }() - } + log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ + "Flush Interval:%s", + a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, + a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) - if pu != nil { - wg.Add(1) - go func() { - defer wg.Done() - a.runProcessors(pu) - }() - } + a.inputGroupUnit.relay.Start() - wg.Add(1) - go func() { - defer wg.Done() - a.runInputs(ctx, startTime, iu) - }() + a.started.L.Lock() + a.hasStarted = true + a.started.Broadcast() + a.started.L.Unlock() - wg.Wait() + <-a.Context().Done() + + // wait for all plugins to stop + a.waitForPluginsToStop() log.Printf("D! [agent] Stopped Successfully") - return err } -// initPlugins runs the Init function on plugins. -func (a *Agent) initPlugins() error { - for _, input := range a.Config.Inputs { - err := input.Init() - if err != nil { - return fmt.Errorf("could not initialize input %s: %v", - input.LogName(), err) - } - } - for _, processor := range a.Config.Processors { - err := processor.Init() - if err != nil { - return fmt.Errorf("could not initialize processor %s: %v", - processor.Config.Name, err) - } - } - for _, aggregator := range a.Config.Aggregators { - err := aggregator.Init() - if err != nil { - return fmt.Errorf("could not initialize aggregator %s: %v", - aggregator.Config.Name, err) - } - } - for _, processor := range a.Config.AggProcessors { - err := processor.Init() - if err != nil { - return fmt.Errorf("could not initialize processor %s: %v", - processor.Config.Name, err) - } - } - for _, output := range a.Config.Outputs { - err := output.Init() - if err != nil { - return fmt.Errorf("could not initialize output %s: %v", - output.Config.Name, err) - } - } - return nil -} +func (a *Agent) AddInput(input *models.RunningInput) { + a.inputGroupUnit.Lock() + defer a.inputGroupUnit.Unlock() -func (a *Agent) startInputs( - dst chan<- telegraf.Metric, - inputs []*models.RunningInput, -) (*inputUnit, error) { - log.Printf("D! [agent] Starting service inputs") + a.inputGroupUnit.inputUnits = append(a.inputGroupUnit.inputUnits, + inputUnit{ + input: input, + }) +} - unit := &inputUnit{ - dst: dst, +func (a *Agent) startInput(input *models.RunningInput) error { + // plugins can start before the agent has started; wait until it's asked to + // start before collecting metrics in case other plugins are still loading. + a.started.L.Lock() + for !a.hasStarted { + a.started.Wait() } - for _, input := range inputs { - if si, ok := input.Input.(telegraf.ServiceInput); ok { - // Service input plugins are not normally subject to timestamp - // rounding except for when precision is set on the input plugin. - // - // This only applies to the accumulator passed to Start(), the - // Gather() accumulator does apply rounding according to the - // precision and interval agent/plugin settings. - var interval time.Duration - var precision time.Duration - if input.Config.Precision != 0 { - precision = input.Config.Precision - } + a.started.L.Unlock() + a.inputGroupUnit.Lock() + // Service input plugins are not normally subject to timestamp + // rounding except for when precision is set on the input plugin. + // + // This only applies to the accumulator passed to Start(), the + // Gather() accumulator does apply rounding according to the + // precision and interval agent/plugin settings. + var interval time.Duration + var precision time.Duration + if input.Config.Precision != 0 { + precision = input.Config.Precision + } - acc := NewAccumulator(input, dst) - acc.SetPrecision(getPrecision(precision, interval)) + // the plugin's Start() gets its own accumulator with no rounding, etc + acc := NewAccumulator(input, a.inputGroupUnit.dst) + acc.SetPrecision(getPrecision(precision, interval)) - err := si.Start(acc) - if err != nil { - stopServiceInputs(unit.inputs) - return nil, fmt.Errorf("starting input %s: %w", input.LogName(), err) - } + for _, inp := range a.inputGroupUnit.inputUnits { + if inp.input.GetID() == input.GetID() { + a.inputGroupUnit.Unlock() + return input.Start(acc) } - unit.inputs = append(unit.inputs, input) } + a.inputGroupUnit.Unlock() - return unit, nil + return errors.New("cannot start input; call AddInput first") } -// runInputs starts and triggers the periodic gather for Inputs. -// -// When the context is done the timers are stopped and this function returns -// after all ongoing Gather calls complete. -func (a *Agent) runInputs( - ctx context.Context, - startTime time.Time, - unit *inputUnit, -) { - var wg sync.WaitGroup - for _, input := range unit.inputs { - // Overwrite agent interval if this plugin has its own. - interval := time.Duration(a.Config.Agent.Interval) - if input.Config.Interval != 0 { - interval = input.Config.Interval - } - - // Overwrite agent precision if this plugin has its own. - precision := time.Duration(a.Config.Agent.Precision) - if input.Config.Precision != 0 { - precision = input.Config.Precision - } - - // Overwrite agent collection_jitter if this plugin has its own. - jitter := time.Duration(a.Config.Agent.CollectionJitter) - if input.Config.CollectionJitter != 0 { - jitter = input.Config.CollectionJitter - } - - var ticker Ticker - if a.Config.Agent.RoundInterval { - ticker = NewAlignedTicker(startTime, interval, jitter) - } else { - ticker = NewUnalignedTicker(interval, jitter) - } - defer ticker.Stop() - - acc := NewAccumulator(input, unit.dst) - acc.SetPrecision(getPrecision(precision, interval)) - - wg.Add(1) - go func(input *models.RunningInput) { - defer wg.Done() - a.gatherLoop(ctx, acc, input, ticker, interval) - }(input) +// RunInput is a blocking call that runs an input forever +func (a *Agent) RunInput(input *models.RunningInput, startTime time.Time) { + // default to agent interval but check for override + interval := a.Config.Agent.Interval.Duration + if input.Config.Interval != 0 { + interval = input.Config.Interval + } + // default to agent precision but check for override + precision := a.Config.Agent.Precision.Duration + if input.Config.Precision != 0 { + precision = input.Config.Precision + } + // default to agent collection_jitter but check for override + jitter := a.Config.Agent.CollectionJitter.Duration + if input.Config.CollectionJitter != 0 { + jitter = input.Config.CollectionJitter } - wg.Wait() - - log.Printf("D! [agent] Stopping service inputs") - stopServiceInputs(unit.inputs) - - close(unit.dst) - log.Printf("D! [agent] Input channel closed") -} - -// testStartInputs is a variation of startInputs for use in --test and --once -// mode. It differs by logging Start errors and returning only plugins -// successfully started. -func (a *Agent) testStartInputs( - dst chan<- telegraf.Metric, - inputs []*models.RunningInput, -) *inputUnit { - log.Printf("D! [agent] Starting service inputs") - - unit := &inputUnit{ - dst: dst, + var ticker Ticker + if a.Config.Agent.RoundInterval { + ticker = NewAlignedTicker(startTime, interval, jitter) + } else { + ticker = NewUnalignedTicker(interval, jitter) } + defer ticker.Stop() - for _, input := range inputs { - if si, ok := input.Input.(telegraf.ServiceInput); ok { - // Service input plugins are not subject to timestamp rounding. - // This only applies to the accumulator passed to Start(), the - // Gather() accumulator does apply rounding according to the - // precision agent setting. - acc := NewAccumulator(input, dst) - acc.SetPrecision(time.Nanosecond) + acc := NewAccumulator(input, a.inputGroupUnit.dst) + acc.SetPrecision(getPrecision(precision, interval)) + ctx, cancelFunc := context.WithCancel(a.ctx) + defer cancelFunc() // just to keep linters happy - err := si.Start(acc) - if err != nil { - log.Printf("E! [agent] Starting input %s: %v", input.LogName(), err) - } + err := errors.New("loop at least once") + for err != nil && ctx.Err() == nil { + if err = a.startInput(input); err != nil { + log.Printf("E! [agent] failed to start plugin %q: %v", input.LogName(), err) + time.Sleep(10 * time.Second) } - - unit.inputs = append(unit.inputs, input) } - return unit -} - -// testRunInputs is a variation of runInputs for use in --test and --once mode. -// Instead of using a ticker to run the inputs they are called once immediately. -func (a *Agent) testRunInputs( - ctx context.Context, - wait time.Duration, - unit *inputUnit, -) { - var wg sync.WaitGroup - - nul := make(chan telegraf.Metric) - go func() { - for range nul { + a.inputGroupUnit.Lock() + for i, iu := range a.inputGroupUnit.inputUnits { + if iu.input == input { + a.inputGroupUnit.inputUnits[i].cancelGather = cancelFunc + break } - }() - - for _, input := range unit.inputs { - wg.Add(1) - go func(input *models.RunningInput) { - defer wg.Done() - - // Overwrite agent interval if this plugin has its own. - interval := time.Duration(a.Config.Agent.Interval) - if input.Config.Interval != 0 { - interval = input.Config.Interval - } - - // Overwrite agent precision if this plugin has its own. - precision := time.Duration(a.Config.Agent.Precision) - if input.Config.Precision != 0 { - precision = input.Config.Precision - } - - // Run plugins that require multiple gathers to calculate rate - // and delta metrics twice. - switch input.Config.Name { - case "cpu", "mongodb", "procstat": - nulAcc := NewAccumulator(input, nul) - nulAcc.SetPrecision(getPrecision(precision, interval)) - if err := input.Input.Gather(nulAcc); err != nil { - nulAcc.AddError(err) - } - - time.Sleep(500 * time.Millisecond) - } - - acc := NewAccumulator(input, unit.dst) - acc.SetPrecision(getPrecision(precision, interval)) - - if err := input.Input.Gather(acc); err != nil { - acc.AddError(err) - } - }(input) } - wg.Wait() + a.inputGroupUnit.Unlock() - internal.SleepContext(ctx, wait) + a.gatherLoop(ctx, acc, input, ticker, interval) + input.Stop() - log.Printf("D! [agent] Stopping service inputs") - stopServiceInputs(unit.inputs) - - close(unit.dst) - log.Printf("D! [agent] Input channel closed") + a.inputGroupUnit.Lock() + for i, iu := range a.inputGroupUnit.inputUnits { + if iu.input == input { + // remove from list + a.inputGroupUnit.inputUnits = append(a.inputGroupUnit.inputUnits[0:i], a.inputGroupUnit.inputUnits[i+1:]...) + break + } + } + a.inputGroupUnit.Unlock() } -// stopServiceInputs stops all service inputs. -func stopServiceInputs(inputs []*models.RunningInput) { - for _, input := range inputs { - if si, ok := input.Input.(telegraf.ServiceInput); ok { - si.Stop() +func (a *Agent) StopInput(i *models.RunningInput) { + defer a.inputGroupUnit.Unlock() +retry: + a.inputGroupUnit.Lock() + for _, iu := range a.inputGroupUnit.inputUnits { + if iu.input == i { + if iu.cancelGather == nil { + // plugin hasn't finished starting, wait longer. + a.inputGroupUnit.Unlock() + time.Sleep(1 * time.Millisecond) + goto retry + } + iu.cancelGather() + break } } } @@ -489,146 +362,197 @@ func (a *Agent) gatherOnce( } } -// startProcessors sets up the processor chain and calls Start on all -// processors. If an error occurs any started processors are Stopped. -func (a *Agent) startProcessors( - dst chan<- telegraf.Metric, - processors models.RunningProcessors, -) (chan<- telegraf.Metric, []*processorUnit, error) { - var units []*processorUnit - - // Sort from last to first - sort.SliceStable(processors, func(i, j int) bool { - return processors[i].Config.Order > processors[j].Config.Order - }) - - var src chan telegraf.Metric - for _, processor := range processors { - src = make(chan telegraf.Metric, 100) - acc := NewAccumulator(processor, dst) - - err := processor.Start(acc) - if err != nil { - for _, u := range units { - u.processor.Stop() - close(u.dst) +func (a *Agent) AddProcessor(processor models.ProcessorRunner) { + a.inputGroupUnit.Lock() + defer a.inputGroupUnit.Unlock() + a.processorGroupUnit.Lock() + defer a.processorGroupUnit.Unlock() + + pu := processorUnit{ + src: make(chan telegraf.Metric, 100), + processor: processor, + } + + // insertPos is the position in the list after the slice insert operation + insertPos := 0 + + if len(a.processorGroupUnit.processorUnits) > 0 { + // figure out where in the list to put us + for i, unit := range a.processorGroupUnit.processorUnits { + if unit.order > int(processor.Order()) { + break } - return nil, nil, fmt.Errorf("starting processor %s: %w", processor.LogName(), err) + insertPos = i + 1 } + } - units = append(units, &processorUnit{ - src: src, - dst: dst, - processor: processor, - }) + // if we're the last processor in the list + if insertPos == len(a.processorGroupUnit.processorUnits) { + pu.dst = a.outputGroupUnit.src + } else { + // we're not the last processor + pu.dst = a.processorGroupUnit.processorUnits[insertPos].src + } + + acc := NewAccumulator(processor.(MetricMaker), pu.dst) + pu.accumulator = acc.(*accumulator) - dst = src + // only if we're the first processor or being inserted at front-of-line: + if insertPos == 0 { + a.inputGroupUnit.relay.SetDest(pu.src) + } else { + // not the first processor to be added + prev := a.processorGroupUnit.processorUnits[insertPos-1] + prev.accumulator.setOutput(pu.src) } - return src, units, nil + list := a.processorGroupUnit.processorUnits + // list[0:insertPos] + pu + list[insertPos:] + // list = [0,1,2,3,4] + // [0,1] + pu + [2,3,4] + a.processorGroupUnit.processorUnits = append(append(list[0:insertPos], &pu), list[insertPos:]...) } -// runProcessors begins processing metrics and runs until the source channel is -// closed and all metrics have been written. -func (a *Agent) runProcessors( - units []*processorUnit, -) { - var wg sync.WaitGroup - for _, unit := range units { - wg.Add(1) - go func(unit *processorUnit) { - defer wg.Done() - - acc := NewAccumulator(unit.processor, unit.dst) - for m := range unit.src { - if err := unit.processor.Add(m, acc); err != nil { - acc.AddError(err) - m.Drop() - } +func (a *Agent) startProcessor(processor models.ProcessorRunner) error { + a.processorGroupUnit.Lock() + + for _, pu := range a.processorGroupUnit.processorUnits { + if pu.processor.GetID() == processor.GetID() { + a.processorGroupUnit.Unlock() + err := processor.Start(pu.accumulator) + if err != nil { + return fmt.Errorf("starting processor %s: %w", processor.LogName(), err) } - unit.processor.Stop() - close(unit.dst) - log.Printf("D! [agent] Processor channel closed") - }(unit) + + return nil + } } - wg.Wait() + + a.processorGroupUnit.Unlock() + return nil } -// startAggregators sets up the aggregator unit and returns the source channel. -func (a *Agent) startAggregators( - aggC chan<- telegraf.Metric, - outputC chan<- telegraf.Metric, - aggregators []*models.RunningAggregator, -) (chan<- telegraf.Metric, *aggregatorUnit) { - src := make(chan telegraf.Metric, 100) - unit := &aggregatorUnit{ - src: src, - aggC: aggC, - outputC: outputC, - aggregators: aggregators, +// RunProcessor is a blocking call that runs a processor forever +func (a *Agent) RunProcessor(p models.ProcessorRunner) { + a.processorGroupUnit.Lock() + pu := a.processorGroupUnit.Find(p) + a.processorGroupUnit.Unlock() + + if pu == nil { + log.Print("W! [agent] Must call AddProcessor before calling RunProcessor") + return } - return src, unit -} -// runAggregators beings aggregating metrics and runs until the source channel -// is closed and all metrics have been written. -func (a *Agent) runAggregators( - startTime time.Time, - unit *aggregatorUnit, -) { - ctx, cancel := context.WithCancel(context.Background()) + err := errors.New("loop at least once") + for err != nil && a.ctx.Err() == nil { + if err = a.startProcessor(p); err != nil { + log.Printf("E! [agent] failed to start processor %q: %v", p.LogName(), err) + time.Sleep(10 * time.Second) + } + } - // Before calling Add, initialize the aggregation window. This ensures - // that any metric created after start time will be aggregated. - for _, agg := range a.Config.Aggregators { - since, until := updateWindow(startTime, a.Config.Agent.RoundInterval, agg.Period()) - agg.UpdateWindow(since, until) + // processors own their src channel, if it's closed either it's been asked to stop or we're shutting down + for m := range pu.src { + if err := pu.processor.Add(m, pu.accumulator); err != nil { + pu.accumulator.AddError(err) + m.Reject() + } } - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for metric := range unit.src { - var dropOriginal bool - for _, agg := range a.Config.Aggregators { - if ok := agg.Add(metric); ok { - dropOriginal = true - } - } + pu.processor.Stop() + // only close dst channel if we're shutting down + if a.ctx.Err() != nil { + a.processorGroupUnit.Lock() + if pu.dst != nil { + close(pu.dst) + } + a.processorGroupUnit.Unlock() + } + + a.processorGroupUnit.Lock() + defer a.processorGroupUnit.Unlock() + for i, curr := range a.processorGroupUnit.processorUnits { + if pu == curr { + // remove it from the slice + // Remove the stopped processor from the units list + a.processorGroupUnit.processorUnits = append(a.processorGroupUnit.processorUnits[0:i], a.processorGroupUnit.processorUnits[i+1:len(a.processorGroupUnit.processorUnits)]...) + } + } +} + +// StopProcessor stops processors or aggregators. +// ProcessorRunner could be a *models.RunningProcessor or a *models.RunningAggregator +func (a *Agent) StopProcessor(p models.ProcessorRunner) { + a.processorGroupUnit.Lock() + defer a.processorGroupUnit.Unlock() - if !dropOriginal { - unit.outputC <- metric // keep original. + for i, curr := range a.processorGroupUnit.processorUnits { + if p.GetID() == curr.processor.GetID() { + if i == 0 { + a.inputGroupUnit.relay.SetDest(curr.dst) } else { - metric.Drop() + prev := a.processorGroupUnit.processorUnits[i-1] + prev.accumulator.setOutput(curr.dst) } + close(curr.src) // closing source will tell the processor to stop. } - cancel() - }() + } +} - for _, agg := range a.Config.Aggregators { - wg.Add(1) - go func(agg *models.RunningAggregator) { - defer wg.Done() +// RunProcessor is a blocking call that runs a processor forever +func (a *Agent) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) { + a.configPluginUnit.Lock() + a.configPluginUnit.plugins = append(a.configPluginUnit.plugins, plugin) + a.configPluginUnit.Unlock() - interval := time.Duration(a.Config.Agent.Interval) - precision := time.Duration(a.Config.Agent.Precision) + <-ctx.Done() + //TODO: we might want to wait for all other plugins to close? + if err := plugin.Close(); err != nil { + log.Printf("E! [agent] Configuration plugin failed to close: %v", err) + } - acc := NewAccumulator(agg, unit.aggC) - acc.SetPrecision(getPrecision(precision, interval)) - a.push(ctx, agg, acc) - }(agg) + // because we don't have wrappers for config plugins, check to see if there's a storage plugin attached and close it. + p := reflect.ValueOf(plugin) + if p.Kind() == reflect.Ptr { + p = p.Elem() } + if v := p.FieldByName("Storage"); !v.IsZero() && v.IsValid() && !v.IsNil() { + if sp, ok := v.Interface().(config.StoragePlugin); ok { + if err := sp.Close(); err != nil { + log.Printf("E! [agent] Storage plugin failed to close: %v", err) + } + } + } - wg.Wait() + a.configPluginUnit.Lock() + pos := -1 + for i, p := range a.configPluginUnit.plugins { + if p == plugin { + pos = i + break + } + } + if pos > -1 { + a.configPluginUnit.plugins = append(a.configPluginUnit.plugins[0:pos], a.configPluginUnit.plugins[pos+1:]...) + } + a.configPluginUnit.Unlock() +} - // In the case that there are no processors, both aggC and outputC are the - // same channel. If there are processors, we close the aggC and the - // processor chain will close the outputC when it finishes processing. - close(unit.aggC) - log.Printf("D! [agent] Aggregator channel closed") +func (a *Agent) StopOutput(output *models.RunningOutput) { + a.outputGroupUnit.Lock() + defer a.outputGroupUnit.Unlock() + + // find plugin + for _, o := range a.outputGroupUnit.outputs { + if o.output == output { + if o.cancelFlush != nil { + o.cancelFlush() + } + } + } } + func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) { var until time.Time if roundInterval { @@ -645,57 +569,24 @@ func updateWindow(start time.Time, roundInterval bool, period time.Duration) (ti return since, until } -// push runs the push for a single aggregator every period. -func (a *Agent) push( - ctx context.Context, - aggregator *models.RunningAggregator, - acc telegraf.Accumulator, -) { - for { - // Ensures that Push will be called for each period, even if it has - // already elapsed before this function is called. This is guaranteed - // because so long as only Push updates the EndPeriod. This method - // also avoids drift by not using a ticker. - until := time.Until(aggregator.EndPeriod()) - - select { - case <-time.After(until): - aggregator.Push(acc) - break - case <-ctx.Done(): - aggregator.Push(acc) - return - } - } -} - -// startOutputs calls Connect on all outputs and returns the source channel. -// If an error occurs calling Connect all stared plugins have Close called. -func (a *Agent) startOutputs( - ctx context.Context, - outputs []*models.RunningOutput, -) (chan<- telegraf.Metric, *outputUnit, error) { - src := make(chan telegraf.Metric, 100) - unit := &outputUnit{src: src} - for _, output := range outputs { - err := a.connectOutput(ctx, output) - if err != nil { - for _, output := range unit.outputs { - output.Close() - } - return nil, nil, fmt.Errorf("connecting output %s: %w", output.LogName(), err) - } +func (a *Agent) AddOutput(output *models.RunningOutput) { + a.outputGroupUnit.Lock() + a.outputGroupUnit.outputs = append(a.outputGroupUnit.outputs, outputUnit{output: output}) + a.outputGroupUnit.Unlock() + } - unit.outputs = append(unit.outputs, output) - } +func (a *Agent) startOutput(output *models.RunningOutput) error { + if err := a.connectOutput(a.ctx, output); err != nil { + return fmt.Errorf("connecting output %s: %w", output.LogName(), err) + } - return src, unit, nil -} + return nil + } // connectOutputs connects to all outputs. func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) error { log.Printf("D! [agent] Attempting connection to [%s]", output.LogName()) - err := output.Output.Connect() + err := output.Connect() if err != nil { log.Printf("E! [agent] Failed to connect to [%s], retrying in 15s, "+ "error was '%s'", output.LogName(), err) @@ -705,7 +596,7 @@ func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) return err } - err = output.Output.Connect() + err = output.Connect() if err != nil { return fmt.Errorf("Error connecting to output %q: %w", output.LogName(), err) } @@ -714,64 +605,82 @@ func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) return nil } -// runOutputs begins processing metrics and returns until the source channel is -// closed and all metrics have been written. On shutdown metrics will be -// written one last time and dropped if unsuccessful. -func (a *Agent) runOutputs( - unit *outputUnit, -) { - var wg sync.WaitGroup - - // Start flush loop - interval := time.Duration(a.Config.Agent.FlushInterval) - jitter := time.Duration(a.Config.Agent.FlushJitter) +// RunOutput runs an output; note the context should be a special context that +// only cancels when it's time for the outputs to close: when the main context +// has closed AND all the input and processor plugins are done. +func (a *Agent) RunOutput(ctx context.Context, output *models.RunningOutput) { + var cancel context.CancelFunc + // wrap with a cancel context so that the StopOutput can stop this individual output without stopping all the outputs. + ctx, cancel = context.WithCancel(ctx) + defer cancel() - ctx, cancel := context.WithCancel(context.Background()) - - for _, output := range unit.outputs { - interval := interval - // Overwrite agent flush_interval if this plugin has its own. - if output.Config.FlushInterval != 0 { - interval = output.Config.FlushInterval + a.outputGroupUnit.Lock() + for i, o := range a.outputGroupUnit.outputs { + if o.output == output { + a.outputGroupUnit.outputs[i].cancelFlush = cancel } + } + a.outputGroupUnit.Unlock() - jitter := jitter - // Overwrite agent flush_jitter if this plugin has its own. - if output.Config.FlushJitter != 0 { - jitter = output.Config.FlushJitter - } + interval := a.Config.Agent.FlushInterval.Duration + jitter := a.Config.Agent.FlushJitter.Duration + // Overwrite agent flush_interval if this plugin has its own. + if output.Config.FlushInterval != 0 { + interval = output.Config.FlushInterval + } - wg.Add(1) - go func(output *models.RunningOutput) { - defer wg.Done() + // Overwrite agent flush_jitter if this plugin has its own. + if output.Config.FlushJitter != 0 { + jitter = output.Config.FlushJitter + } - ticker := NewRollingTicker(interval, jitter) - defer ticker.Stop() + ticker := NewRollingTicker(interval, jitter) + defer ticker.Stop() - a.flushLoop(ctx, output, ticker) - }(output) + err := errors.New("loop at least once") + for err != nil && a.ctx.Err() == nil { + if err = a.startOutput(output); err != nil { + log.Printf("E! [agent] failed to start output %q: %v", output.LogName(), err) + time.Sleep(10 * time.Second) + } } - for metric := range unit.src { - for i, output := range unit.outputs { - if i == len(a.Config.Outputs)-1 { - output.AddMetric(metric) - } else { - output.AddMetric(metric.Copy()) - } + a.flushLoop(ctx, output, ticker) + + a.outputGroupUnit.Lock() + + // find plugin + for i, o := range a.outputGroupUnit.outputs { + if o.output == output { + // disconnect it from the output broadcaster and remove it from the list + a.outputGroupUnit.outputs = append(a.outputGroupUnit.outputs[:i], a.outputGroupUnit.outputs[i+1:]...) } - } + } + a.outputGroupUnit.Unlock() - log.Println("I! [agent] Hang on, flushing any cached metrics before shutdown") - cancel() - wg.Wait() + a.flushOnce(output, ticker, output.Write) - log.Println("I! [agent] Stopping running outputs") - stopRunningOutputs(unit.outputs) + output.Close() } -// flushLoop runs an output's flush function periodically until the context is -// done. +// runOutputFanout does the outputGroupUnit fanout, copying a metric to all outputs +func (a *Agent) runOutputFanout() { + for metric := range a.outputGroupUnit.src { + // if there are no outputs I guess we're dropping them. + a.outputGroupUnit.Lock() + outs := a.outputGroupUnit.outputs + a.outputGroupUnit.Unlock() + for i, output := range outs { + if i == len(outs)-1 { + output.output.AddMetric(metric) + } else { + output.output.AddMetric(metric.Copy()) + } + } + } + + +// flushLoop runs an output's flush function periodically until the context is done. func (a *Agent) flushLoop( ctx context.Context, output *models.RunningOutput, @@ -838,218 +747,7 @@ func (a *Agent) flushOnce( } } -// Test runs the inputs, processors and aggregators for a single gather and -// writes the metrics to stdout. -func (a *Agent) Test(ctx context.Context, wait time.Duration) error { - src := make(chan telegraf.Metric, 100) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - s := influx.NewSerializer() - s.SetFieldSortOrder(influx.SortFields) - - for metric := range src { - octets, err := s.Serialize(metric) - if err == nil { - fmt.Print("> ", string(octets)) - } - metric.Reject() - } - }() - - err := a.test(ctx, wait, src) - if err != nil { - return err - } - - wg.Wait() - - if models.GlobalGatherErrors.Get() != 0 { - return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) - } - return nil -} - -// Test runs the agent and performs a single gather sending output to the -// outputF. After gathering pauses for the wait duration to allow service -// inputs to run. -func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- telegraf.Metric) error { - log.Printf("D! [agent] Initializing plugins") - err := a.initPlugins() - if err != nil { - return err - } - - startTime := time.Now() - - next := outputC - - var apu []*processorUnit - var au *aggregatorUnit - if len(a.Config.Aggregators) != 0 { - procC := next - if len(a.Config.AggProcessors) != 0 { - procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) - if err != nil { - return err - } - } - - next, au = a.startAggregators(procC, next, a.Config.Aggregators) - } - - var pu []*processorUnit - if len(a.Config.Processors) != 0 { - next, pu, err = a.startProcessors(next, a.Config.Processors) - if err != nil { - return err - } - } - - iu := a.testStartInputs(next, a.Config.Inputs) - - var wg sync.WaitGroup - if au != nil { - wg.Add(1) - go func() { - defer wg.Done() - a.runProcessors(apu) - }() - - wg.Add(1) - go func() { - defer wg.Done() - a.runAggregators(startTime, au) - }() - } - - if pu != nil { - wg.Add(1) - go func() { - defer wg.Done() - a.runProcessors(pu) - }() - } - - wg.Add(1) - go func() { - defer wg.Done() - a.testRunInputs(ctx, wait, iu) - }() - - wg.Wait() - - log.Printf("D! [agent] Stopped Successfully") - - return nil -} - -// Once runs the full agent for a single gather. -func (a *Agent) Once(ctx context.Context, wait time.Duration) error { - err := a.once(ctx, wait) - if err != nil { - return err - } - - if models.GlobalGatherErrors.Get() != 0 { - return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) - } - - unsent := 0 - for _, output := range a.Config.Outputs { - unsent += output.BufferLength() - } - if unsent != 0 { - return fmt.Errorf("output plugins unable to send %d metrics", unsent) - } - return nil -} - -// On runs the agent and performs a single gather sending output to the -// outputF. After gathering pauses for the wait duration to allow service -// inputs to run. -func (a *Agent) once(ctx context.Context, wait time.Duration) error { - log.Printf("D! [agent] Initializing plugins") - err := a.initPlugins() - if err != nil { - return err - } - - startTime := time.Now() - - log.Printf("D! [agent] Connecting outputs") - next, ou, err := a.startOutputs(ctx, a.Config.Outputs) - if err != nil { - return err - } - - var apu []*processorUnit - var au *aggregatorUnit - if len(a.Config.Aggregators) != 0 { - procC := next - if len(a.Config.AggProcessors) != 0 { - procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) - if err != nil { - return err - } - } - - next, au = a.startAggregators(procC, next, a.Config.Aggregators) - } - var pu []*processorUnit - if len(a.Config.Processors) != 0 { - next, pu, err = a.startProcessors(next, a.Config.Processors) - if err != nil { - return err - } - } - - iu := a.testStartInputs(next, a.Config.Inputs) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - a.runOutputs(ou) - }() - - if au != nil { - wg.Add(1) - go func() { - defer wg.Done() - a.runProcessors(apu) - }() - - wg.Add(1) - go func() { - defer wg.Done() - a.runAggregators(startTime, au) - }() - } - - if pu != nil { - wg.Add(1) - go func() { - defer wg.Done() - a.runProcessors(pu) - }() - } - - wg.Add(1) - go func() { - defer wg.Done() - a.testRunInputs(ctx, wait, iu) - }() - - wg.Wait() - - log.Printf("D! [agent] Stopped Successfully") - - return nil -} // Returns the rounding precision for metrics. func getPrecision(precision, interval time.Duration) time.Duration { @@ -1081,3 +779,95 @@ func panicRecover(input *models.RunningInput) { "https://github.com/influxdata/telegraf/issues/new/choose") } } + +func (a *Agent) RunningInputs() []*models.RunningInput { + a.inputGroupUnit.Lock() + defer a.inputGroupUnit.Unlock() + runningInputs := []*models.RunningInput{} + + for _, iu := range a.inputGroupUnit.inputUnits { + runningInputs = append(runningInputs, iu.input) + } + return runningInputs +} + +func (a *Agent) RunningProcessors() []models.ProcessorRunner { + a.processorGroupUnit.Lock() + defer a.processorGroupUnit.Unlock() + runningProcessors := []models.ProcessorRunner{} + for _, pu := range a.processorGroupUnit.processorUnits { + runningProcessors = append(runningProcessors, pu.processor) + } + return runningProcessors +} + +func (a *Agent) RunningOutputs() []*models.RunningOutput { + a.outputGroupUnit.Lock() + defer a.outputGroupUnit.Unlock() + runningOutputs := []*models.RunningOutput{} + // make sure we allocate and use a new slice that doesn't need a lock + for _, o := range a.outputGroupUnit.outputs { + runningOutputs = append(runningOutputs, o.output) + } + + return runningOutputs +} + +func (a *Agent) Context() context.Context { + a.inputGroupUnit.Lock() + defer a.inputGroupUnit.Unlock() + return a.ctx +} + +func (a *Agent) waitForPluginsToStop() { + for { + a.inputGroupUnit.Lock() + if len(a.inputGroupUnit.inputUnits) > 0 { + // fmt.Printf("waiting for %d inputs\n", len(a.inputGroupUnit.inputUnits)) + a.inputGroupUnit.Unlock() + time.Sleep(100 * time.Millisecond) + continue + } + break + } + close(a.inputGroupUnit.dst) + a.inputGroupUnit.Unlock() + for { + time.Sleep(100 * time.Millisecond) + a.processorGroupUnit.Lock() + if len(a.processorGroupUnit.processorUnits) > 0 { + // fmt.Printf("waiting for %d processors\n", len(a.processorGroupUnit.processorUnits)) + a.processorGroupUnit.Unlock() + time.Sleep(100 * time.Millisecond) + continue + } + break + } + a.processorGroupUnit.Unlock() + for { + a.outputGroupUnit.Lock() + if len(a.outputGroupUnit.outputs) > 0 { + // fmt.Printf("waiting for %d outputs\n", len(a.outputGroupUnit.outputs)) + a.outputGroupUnit.Unlock() + time.Sleep(100 * time.Millisecond) + continue + } + break + } + a.outputGroupUnit.Unlock() + + for { + a.configPluginUnit.Lock() + if len(a.configPluginUnit.plugins) > 0 { + // fmt.Printf("waiting for %d config plugins\n", len(a.configPluginUnit.plugins)) + a.configPluginUnit.Unlock() + continue + } + break + } + a.configPluginUnit.Unlock() + + // everything closed; shut down + return + +} diff --git a/agent/agent_test.go b/agent/agent_test.go index 9cc631b17c465..ea04f4dda2640 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -14,98 +14,78 @@ import ( func TestAgent_OmitHostname(t *testing.T) { c := config.NewConfig() c.Agent.OmitHostname = true - _, err := NewAgent(c) - assert.NoError(t, err) - assert.NotContains(t, c.Tags, "host") + require.NotContains(t, c.Tags, "host") } func TestAgent_LoadPlugin(t *testing.T) { c := config.NewConfig() c.InputFilters = []string{"mysql"} - err := c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ := NewAgent(c) - assert.Equal(t, 1, len(a.Config.Inputs)) + a := NewAgent(context.Background(), c) + c.SetAgent(a) + err := c.LoadConfig(context.Background(), context.Background(), "../config/testdata/telegraf-agent.toml") + require.NoError(t, err) + require.Equal(t, 1, len(a.Config.Inputs())) c = config.NewConfig() c.InputFilters = []string{"foo"} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ = NewAgent(c) - assert.Equal(t, 0, len(a.Config.Inputs)) + a = NewAgent(context.Background(), c) + c.SetAgent(a) + err = c.LoadConfig(context.Background(), context.Background(), "../config/testdata/telegraf-agent.toml") + require.NoError(t, err) + require.Equal(t, 0, len(a.Config.Inputs())) c = config.NewConfig() c.InputFilters = []string{"mysql", "foo"} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ = NewAgent(c) - assert.Equal(t, 1, len(a.Config.Inputs)) + a = NewAgent(context.Background(), c) + c.SetAgent(a) + err = c.LoadConfig(context.Background(), context.Background(), "../config/testdata/telegraf-agent.toml") + require.NoError(t, err) + require.Equal(t, 1, len(a.Config.Inputs())) c = config.NewConfig() c.InputFilters = []string{"mysql", "redis"} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ = NewAgent(c) - assert.Equal(t, 2, len(a.Config.Inputs)) + a = NewAgent(context.Background(), c) + c.SetAgent(a) + err = c.LoadConfig(context.Background(), context.Background(), "../config/testdata/telegraf-agent.toml") + require.NoError(t, err) + require.Equal(t, 2, len(a.Config.Inputs())) c = config.NewConfig() c.InputFilters = []string{"mysql", "foo", "redis", "bar"} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ = NewAgent(c) - assert.Equal(t, 2, len(a.Config.Inputs)) + a = NewAgent(context.Background(), c) + c.SetAgent(a) + err = c.LoadConfig(context.Background(), context.Background(), "../config/testdata/telegraf-agent.toml") + require.NoError(t, err) + require.Equal(t, 2, len(a.Config.Inputs())) } func TestAgent_LoadOutput(t *testing.T) { - c := config.NewConfig() - c.OutputFilters = []string{"influxdb"} - err := c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ := NewAgent(c) - assert.Equal(t, 2, len(a.Config.Outputs)) - - c = config.NewConfig() - c.OutputFilters = []string{"kafka"} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ = NewAgent(c) - assert.Equal(t, 1, len(a.Config.Outputs)) - - c = config.NewConfig() - c.OutputFilters = []string{} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ = NewAgent(c) - assert.Equal(t, 3, len(a.Config.Outputs)) - - c = config.NewConfig() - c.OutputFilters = []string{"foo"} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ = NewAgent(c) - assert.Equal(t, 0, len(a.Config.Outputs)) - - c = config.NewConfig() - c.OutputFilters = []string{"influxdb", "foo"} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ = NewAgent(c) - assert.Equal(t, 2, len(a.Config.Outputs)) - - c = config.NewConfig() - c.OutputFilters = []string{"influxdb", "kafka"} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - assert.Equal(t, 3, len(c.Outputs)) - a, _ = NewAgent(c) - assert.Equal(t, 3, len(a.Config.Outputs)) - - c = config.NewConfig() - c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"} - err = c.LoadConfig("../config/testdata/telegraf-agent.toml") - assert.NoError(t, err) - a, _ = NewAgent(c) - assert.Equal(t, 3, len(a.Config.Outputs)) + tests := []struct { + OutputFilters []string + ExpectedOutputs int + }{ + {OutputFilters: []string{"influxdb"}, ExpectedOutputs: 2}, // two instances in toml + {OutputFilters: []string{"kafka"}, ExpectedOutputs: 1}, + {OutputFilters: []string{}, ExpectedOutputs: 3}, + {OutputFilters: []string{"foo"}, ExpectedOutputs: 0}, + {OutputFilters: []string{"influxdb", "foo"}, ExpectedOutputs: 2}, + {OutputFilters: []string{"influxdb", "kafka"}, ExpectedOutputs: 3}, + {OutputFilters: []string{"influxdb", "foo", "kafka", "bar"}, ExpectedOutputs: 3}, + } + for _, test := range tests { + name := strings.Join(test.OutputFilters, "-") + t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c := config.NewConfig() + c.OutputFilters = test.OutputFilters + a := NewAgent(ctx, c) + c.SetAgent(a) + err := c.LoadConfig(context.Background(), context.Background(), "../config/testdata/telegraf-agent.toml") + require.NoError(t, err) + require.Len(t, a.Config.Outputs(), test.ExpectedOutputs) + }) + } } func TestWindow(t *testing.T) { diff --git a/agent/tick_test.go b/agent/tick_test.go index 69bf0c2affa39..510af1f10f95f 100644 --- a/agent/tick_test.go +++ b/agent/tick_test.go @@ -34,10 +34,8 @@ func TestAlignedTicker(t *testing.T) { clock.Add(10 * time.Second) for !clock.Now().After(until) { - select { - case tm := <-ticker.Elapsed(): - actual = append(actual, tm.UTC()) - } + tm := <-ticker.Elapsed() + actual = append(actual, tm.UTC()) clock.Add(10 * time.Second) } diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go index 7cfde02692cd4..8fabe2136ca29 100644 --- a/internal/rotate/file_writer.go +++ b/internal/rotate/file_writer.go @@ -4,6 +4,7 @@ package rotate import ( "fmt" "io" + "log" "os" "path/filepath" "sort" @@ -131,7 +132,7 @@ func (w *FileWriter) rotateIfNeeded() error { (w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) { if err := w.rotate(); err != nil { //Ignore rotation errors and keep the log open - fmt.Printf("unable to rotate the file '%s', %s", w.filename, err.Error()) + log.Printf("E! [agent] unable to rotate the file '%s', %s", w.filename, err.Error()) } return w.openCurrent() } From 42a21e0299e32e33f3e2d79e96492f522f786068 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 20 Jul 2021 15:52:53 -0400 Subject: [PATCH 04/48] added docs --- config/types_test.go | 9 +- docs/AGGREGATORS_AND_PROCESSORS.md | 17 +++ docs/CONFIG_API.md | 195 +++++++++++++++++++++++++++++ metric.go | 2 +- 4 files changed, 219 insertions(+), 4 deletions(-) create mode 100644 docs/CONFIG_API.md diff --git a/config/types_test.go b/config/types_test.go index afff599e3d6e4..17aa8bf8df8b6 100644 --- a/config/types_test.go +++ b/config/types_test.go @@ -1,17 +1,19 @@ package config_test import ( + "context" "testing" "time" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/processors/reverse_dns" "github.com/stretchr/testify/require" ) func TestConfigDuration(t *testing.T) { c := config.NewConfig() - err := c.LoadConfigData([]byte(` + err := c.LoadConfigData(context.Background(), context.Background(), []byte(` [[processors.reverse_dns]] cache_ttl = "3h" lookup_timeout = "17s" @@ -21,9 +23,10 @@ func TestConfigDuration(t *testing.T) { field = "source_ip" dest = "source_name" `)) + c.SetAgent(&testAgentController{}) require.NoError(t, err) - require.Len(t, c.Processors, 1) - p := c.Processors[0].Processor.(*reverse_dns.ReverseDNS) + require.Len(t, c.Processors(), 1) + p := c.Processors()[0].(*models.RunningProcessor).Processor.(*reverse_dns.ReverseDNS) require.EqualValues(t, p.CacheTTL, 3*time.Hour) require.EqualValues(t, p.LookupTimeout, 17*time.Second) require.Equal(t, p.MaxParallelLookups, 13) diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index 934a4b0cf7706..b9302baec5c25 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -65,3 +65,20 @@ the plugin receives, you can make use of `taginclude` to group aggregates by specific tags only. **Note:** Aggregator plugins only aggregate metrics within their periods (`now() - period`). Data with a timestamp earlier than `now() - period` cannot be included. + + + +------------+ Processors and aggregators can be ordered +--------+ + | Input +---+ and chained arbitrarily +--->+ Output | + +------------+ | | +--------+ + | | + +------------+ | +-----------+ +------------+ +-----------+ +------------+ | +--------+ + | Input +------>+ Processor +--->+ Aggregator +----->+ Processor +--->+ Aggregator +-->---->+ Output | + +------------+ | +-----------+ +------------+ +-----------+ +------------+ | +--------+ + | | + +------------+ | | +--------+ + | Input +---+ +--->+ Output | + +------------+ | | +--------+ + | | +  +------------+ | | +--------+ +  | Input      +---+ +--->+ Output | +  +------------+ +--------+ diff --git a/docs/CONFIG_API.md b/docs/CONFIG_API.md new file mode 100644 index 0000000000000..3a35959473004 --- /dev/null +++ b/docs/CONFIG_API.md @@ -0,0 +1,195 @@ +# Config API + +## Endpoints + +### GET /plugins/list + +List all known plugins with default config. Each plugin is listed once. + +**request params** + +None + +**response** + +An array of plugin-config schemas. + +eg: +```json +[ + { + "name": "mqtt_consumer", + "config": { + "servers": { + "type": "[string]", // another example: Map[string, SomeSchema] + "default": ["http://127.0.0.1"], + "required": true, + }, + "topics": { + "type": "[string]", + "default": [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ], + "required": true, + }, + "topic_tag": { + "type": "string", + "default": "topic", + }, + "username": { + "type": "string", + "required": false, + }, + "password": { + "type": "string", + "required": false, + }, + "qos": { + "type": "integer", + "format": "int64", + }, + "connection_timeout": { + "type": "integer", + "format": "duration" + }, + "max_undelivered_messages": { + "type": "integer", + "format": "int32", + } + } + }, + // ... +] +``` + +### GET /plugins/running + +List all currently running plugins. If there are 5 copies of a plugin, all 5 will be returned. + +**request params** + +none + +**response** + +```json +[ + { + "id": "unique-id-here", + "name": "mqtt_consumer", + "config": { + "servers": ["tcp://127.0.0.1:1883"], + "topics": [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ], + "topic_tag": "topic", + "qos": 0, + "connection_timeout": 300000000000, + "max_undelivered_messages": 1000, + "persistent_session": false, + "client_id": "", + "username": "telegraf", + // password not returned + "tls_ca": "/etc/telegraf/ca.pem", + "tls_cert": "/etc/telegraf/cert.pem", + "tls_key": "/etc/telegraf/key.pem", + "insecure_skip_verify": false, + "data_format": "influx", + }, + }, +] +``` + +### POST /plugins/create + +Create a new plugin. It will be started upon creation. + +**request params** + +```json + { + "name": "mqtt_consumer", + "config": {. + // .. + }, + }, +``` + +**response** + +```json + {"id": "unique-id-here"} +``` + +### GET /plugins/{id}/status + +Get the status of a launched plugin + +**request params** + +None. ID in url + +**response** + +```json + { + "status": "", // starting, running, notfound, or error + "reason": "", // extended reason code containing error details. + } +``` + +### DELETE /plugins/{id} + +Stop an existing running plugin given its `id`. It will be allowed to finish +any metrics in-progress. + +**request params** + +None + +**response** + +200 OK +```json +{} +``` + +## Schemas + +### plugin-config + +A plugin-config is a plugin name and details about the config fields. + +``` + { + name: string, + config: Map[string, FieldConfig] + } +``` + +### FieldConfig + +``` + { + type: string, // eg "string", "integer", "[string]", or "Map[string, SomeSchema]" + default: object, // whatever the default value is + required: bool, + format: string, // type-specific format info. + } +``` + +### plugin + +An instance of a plugin running with a specific configuration + +``` +{ + id: string, + name: string, + config: Map[string, object], +} +``` \ No newline at end of file diff --git a/metric.go b/metric.go index 23098bb8bc71e..f9f12419f6f29 100644 --- a/metric.go +++ b/metric.go @@ -119,5 +119,5 @@ type Metric interface { // Drop marks the metric as processed successfully without being written // to any output. - Drop() + Drop() // TODO: Rename to Ack() ? } From 9cb191096d2e1893706fd4d22f24537f6ae5764e Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 11:38:00 -0400 Subject: [PATCH 05/48] running aggregator, id --- models/filter.go | 16 +-- models/id.go | 17 +++ models/running_aggregator.go | 168 +++++++++++++++++++++++------- models/running_aggregator_test.go | 107 +++++++++++++------ 4 files changed, 234 insertions(+), 74 deletions(-) create mode 100644 models/id.go diff --git a/models/filter.go b/models/filter.go index 8103c23173297..988a895e7d0b6 100644 --- a/models/filter.go +++ b/models/filter.go @@ -16,22 +16,22 @@ type TagFilter struct { // Filter containing drop/pass and tagdrop/tagpass rules type Filter struct { - NameDrop []string + NameDrop []string `toml:"namedrop"` nameDrop filter.Filter - NamePass []string + NamePass []string `toml:"namepass"` namePass filter.Filter - FieldDrop []string + FieldDrop []string `toml:"fielddrop"` fieldDrop filter.Filter - FieldPass []string + FieldPass []string `toml:"fieldpass"` fieldPass filter.Filter - TagDrop []TagFilter - TagPass []TagFilter + TagDrop []TagFilter `toml:"tagdrop"` + TagPass []TagFilter `toml:"tagpass"` - TagExclude []string + TagExclude []string `toml:"tagexclude"` tagExclude filter.Filter - TagInclude []string + TagInclude []string `toml:"taginclude"` tagInclude filter.Filter isActive bool diff --git a/models/id.go b/models/id.go new file mode 100644 index 0000000000000..05d3eeb9b19c2 --- /dev/null +++ b/models/id.go @@ -0,0 +1,17 @@ +package models + +import ( + "math/rand" // TODO: maybe switch to crypto/rand + "sync/atomic" +) + +var ( + globalPluginIDIncrement uint32 = 0 + globalInstanceID uint32 = rand.Uint32() // set a new instance ID on every app load. +) + +// NextPluginID generates a globally unique plugin ID for use referencing the plugin within the lifetime of Telegraf. +func NextPluginID() uint64 { + num := atomic.AddUint32(&globalPluginIDIncrement, 1) + return uint64(globalInstanceID)<<32 + uint64(num) +} diff --git a/models/running_aggregator.go b/models/running_aggregator.go index 5aa3979c36926..bbe9fbe24ef8c 100644 --- a/models/running_aggregator.go +++ b/models/running_aggregator.go @@ -1,16 +1,20 @@ package models import ( + "context" "sync" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/selfstat" ) type RunningAggregator struct { sync.Mutex + + ID uint64 Aggregator telegraf.Aggregator Config *AggregatorConfig periodStart time.Time @@ -21,6 +25,13 @@ type RunningAggregator struct { MetricsFiltered selfstat.Stat MetricsDropped selfstat.Stat PushTime selfstat.Stat + State + + RoundInterval bool // comes from config + + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup } func NewRunningAggregator(aggregator telegraf.Aggregator, config *AggregatorConfig) *RunningAggregator { @@ -37,7 +48,8 @@ func NewRunningAggregator(aggregator telegraf.Aggregator, config *AggregatorConf SetLoggerOnPlugin(aggregator, logger) - return &RunningAggregator{ + a := &RunningAggregator{ + ID: NextPluginID(), Aggregator: aggregator, Config: config, MetricsPushed: selfstat.Register( @@ -62,22 +74,29 @@ func NewRunningAggregator(aggregator telegraf.Aggregator, config *AggregatorConf ), log: logger, } + a.setState(PluginStateCreated) + return a } // AggregatorConfig is the common config for all aggregators. type AggregatorConfig struct { - Name string - Alias string - DropOriginal bool - Period time.Duration - Delay time.Duration - Grace time.Duration + Name string `toml:"name"` + Alias string `toml:"alias"` + DropOriginal bool `toml:"drop_original"` + Period time.Duration `toml:"period"` + Delay time.Duration `toml:"delay"` + Grace time.Duration `toml:"grace"` - NameOverride string - MeasurementPrefix string - MeasurementSuffix string - Tags map[string]string - Filter Filter + NameOverride string `toml:"name_override"` + MeasurementPrefix string `toml:"measurement_prefix"` + MeasurementSuffix string `toml:"measurement_suffix"` + Tags map[string]string `toml:"tags"` + Filter Filter `toml:"filter"` + Order int64 `toml:"order"` +} + +func (r *RunningAggregator) Order() int64 { + return r.Config.Order } func (r *RunningAggregator) LogName() string { @@ -94,39 +113,35 @@ func (r *RunningAggregator) Init() error { return nil } -func (r *RunningAggregator) Period() time.Duration { - return r.Config.Period -} +// func (r *RunningAggregator) Period() time.Duration { +// return r.Config.Period +// } -func (r *RunningAggregator) EndPeriod() time.Time { - return r.periodEnd -} +// func (r *RunningAggregator) EndPeriod() time.Time { +// return r.periodEnd +// } -func (r *RunningAggregator) UpdateWindow(start, until time.Time) { +func (r *RunningAggregator) updateWindow(start, until time.Time) { r.periodStart = start r.periodEnd = until r.log.Debugf("Updated aggregation range [%s, %s]", start, until) } func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { - m := makemetric( - metric, - r.Config.NameOverride, - r.Config.MeasurementPrefix, - r.Config.MeasurementSuffix, - r.Config.Tags, - nil) - - r.MetricsPushed.Incr(1) - - return m + return metric } // Add a metric to the aggregator and return true if the original metric // should be dropped. -func (r *RunningAggregator) Add(m telegraf.Metric) bool { +func (r *RunningAggregator) Add(m telegraf.Metric, acc telegraf.Accumulator) error { + defer func() { + if !r.Config.DropOriginal && len(m.FieldList()) > 0 { + acc.AddMetric(m) + } + }() + if ok := r.Config.Filter.Select(m); !ok { - return false + return nil } // Make a copy of the metric but don't retain tracking. We do not fail a @@ -138,21 +153,23 @@ func (r *RunningAggregator) Add(m telegraf.Metric) bool { r.Config.Filter.Modify(m) if len(m.FieldList()) == 0 { r.MetricsFiltered.Incr(1) - return r.Config.DropOriginal + m.Drop() + return nil } r.Lock() defer r.Unlock() + // check if outside agg window if m.Time().Before(r.periodStart.Add(-r.Config.Grace)) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) { r.log.Debugf("Metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s", m.Time(), r.periodStart, r.periodEnd, r.Config.Grace) r.MetricsDropped.Incr(1) - return r.Config.DropOriginal + return nil } r.Aggregator.Add(m) - return r.Config.DropOriginal + return nil } func (r *RunningAggregator) Push(acc telegraf.Accumulator) { @@ -161,12 +178,34 @@ func (r *RunningAggregator) Push(acc telegraf.Accumulator) { since := r.periodEnd until := r.periodEnd.Add(r.Config.Period) - r.UpdateWindow(since, until) + r.updateWindow(since, until) - r.push(acc) + r.push(acc.WithNewMetricMaker(r.LogName(), r.Log(), r.pushMetricMaker)) r.Aggregator.Reset() } +// not passed on to agg at this time +func (r *RunningAggregator) Start(acc telegraf.Accumulator) error { + r.setState(PluginStateRunning) + + since, until := r.calculateUpdateWindow(time.Now()) + r.updateWindow(since, until) + + r.ctx, r.cancel = context.WithCancel(context.Background()) + r.wg.Add(1) + go r.pushLoop(acc) + + return nil +} + +// not passed on to agg at this time +func (r *RunningAggregator) Stop() { + r.setState(PluginStateStopping) + r.cancel() + r.wg.Wait() + r.setState(PluginStateDead) +} + func (r *RunningAggregator) push(acc telegraf.Accumulator) { start := time.Now() r.Aggregator.Push(acc) @@ -177,3 +216,58 @@ func (r *RunningAggregator) push(acc telegraf.Accumulator) { func (r *RunningAggregator) Log() telegraf.Logger { return r.log } + +// Before calling Add, initialize the aggregation window. This ensures +// that any metric created after start time will be aggregated. +func (r *RunningAggregator) calculateUpdateWindow(start time.Time) (since time.Time, until time.Time) { + if r.RoundInterval { + until = internal.AlignTime(start, r.Config.Period) + if until == start { + until = internal.AlignTime(start.Add(time.Nanosecond), r.Config.Period) + } + } else { + until = start.Add(r.Config.Period) + } + + since = until.Add(-r.Config.Period) + + return since, until +} + +func (r *RunningAggregator) pushLoop(acc telegraf.Accumulator) { + for { + // Ensures that Push will be called for each period, even if it has + // already elapsed before this function is called. This is guaranteed + // because so long as only Push updates the EndPeriod. This method + // also avoids drift by not using a ticker. + until := time.Until(r.periodEnd) + + select { + case <-time.After(until): + r.Push(acc) + break + case <-r.ctx.Done(): + r.Push(acc) + r.wg.Done() + return + } + } +} + +func (r *RunningAggregator) pushMetricMaker(metric telegraf.Metric) telegraf.Metric { + m := makemetric( + metric, + r.Config.NameOverride, + r.Config.MeasurementPrefix, + r.Config.MeasurementSuffix, + r.Config.Tags, + nil) + + r.MetricsPushed.Incr(1) + + return m +} + +func (r *RunningAggregator) GetID() uint64 { + return r.ID +} diff --git a/models/running_aggregator_test.go b/models/running_aggregator_test.go index a858859652acf..398c9d0946da0 100644 --- a/models/running_aggregator_test.go +++ b/models/running_aggregator_test.go @@ -16,13 +16,14 @@ func TestAdd(t *testing.T) { Filter: Filter{ NamePass: []string{"*"}, }, - Period: time.Millisecond * 500, + DropOriginal: true, + Period: time.Millisecond * 500, }) require.NoError(t, ra.Config.Filter.Compile()) - acc := testutil.Accumulator{} + acc := &testutil.Accumulator{} now := time.Now() - ra.UpdateWindow(now, now.Add(ra.Config.Period)) + ra.updateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -31,13 +32,49 @@ func TestAdd(t *testing.T) { }, time.Now().Add(time.Millisecond*150), telegraf.Untyped) - require.False(t, ra.Add(m)) - ra.Push(&acc) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) + ra.Push(acc) require.Equal(t, 1, len(acc.Metrics)) require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"]) } +func TestAddWithoutDrop(t *testing.T) { + a := &TestAggregator{} + ra := NewRunningAggregator(a, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"*"}, + }, + DropOriginal: false, + Period: time.Millisecond * 500, + }) + require.NoError(t, ra.Config.Filter.Compile()) + acc := &testutil.Accumulator{} + + now := time.Now() + ra.updateWindow(now, now.Add(ra.Config.Period)) + + m := testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + time.Now().Add(time.Millisecond*150), + telegraf.Untyped) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) + ra.Push(acc) + + require.Equal(t, 2, len(acc.Metrics)) + metric := acc.Metrics[0] + if _, ok := metric.Fields["sum"]; !ok { + metric = acc.Metrics[1] + } + require.Equal(t, int64(101), metric.Fields["sum"]) +} + func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { a := &TestAggregator{} ra := NewRunningAggregator(a, &AggregatorConfig{ @@ -45,12 +82,13 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { Filter: Filter{ NamePass: []string{"*"}, }, - Period: time.Millisecond * 500, + DropOriginal: true, + Period: time.Millisecond * 500, }) require.NoError(t, ra.Config.Filter.Compile()) - acc := testutil.Accumulator{} + acc := &testutil.Accumulator{} now := time.Now() - ra.UpdateWindow(now, now.Add(ra.Config.Period)) + ra.updateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -60,7 +98,8 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { now.Add(-time.Hour), telegraf.Untyped, ) - require.False(t, ra.Add(m)) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) // metric after current period m = testutil.MustMetric("RITest", @@ -71,7 +110,8 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { now.Add(time.Hour), telegraf.Untyped, ) - require.False(t, ra.Add(m)) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) // "now" metric m = testutil.MustMetric("RITest", @@ -81,9 +121,10 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { }, time.Now().Add(time.Millisecond*50), telegraf.Untyped) - require.False(t, ra.Add(m)) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) - ra.Push(&acc) + ra.Push(acc) require.Equal(t, 1, len(acc.Metrics)) require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"]) } @@ -95,13 +136,14 @@ func TestAddMetricsOutsideCurrentPeriodWithGrace(t *testing.T) { Filter: Filter{ NamePass: []string{"*"}, }, - Period: time.Millisecond * 1500, - Grace: time.Millisecond * 500, + Period: time.Millisecond * 1500, + Grace: time.Millisecond * 500, + DropOriginal: true, }) require.NoError(t, ra.Config.Filter.Compile()) - acc := testutil.Accumulator{} + acc := &testutil.Accumulator{} now := time.Now() - ra.UpdateWindow(now, now.Add(ra.Config.Period)) + ra.updateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -111,7 +153,8 @@ func TestAddMetricsOutsideCurrentPeriodWithGrace(t *testing.T) { now.Add(-time.Hour), telegraf.Untyped, ) - require.False(t, ra.Add(m)) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) // metric before current period (late) m = testutil.MustMetric("RITest", @@ -122,7 +165,8 @@ func TestAddMetricsOutsideCurrentPeriodWithGrace(t *testing.T) { now.Add(-time.Millisecond*1000), telegraf.Untyped, ) - require.False(t, ra.Add(m)) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) // metric before current period, but within grace period (late) m = testutil.MustMetric("RITest", @@ -133,7 +177,8 @@ func TestAddMetricsOutsideCurrentPeriodWithGrace(t *testing.T) { now.Add(-time.Millisecond*200), telegraf.Untyped, ) - require.False(t, ra.Add(m)) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) // "now" metric m = testutil.MustMetric("RITest", @@ -143,9 +188,10 @@ func TestAddMetricsOutsideCurrentPeriodWithGrace(t *testing.T) { }, time.Now().Add(time.Millisecond*50), telegraf.Untyped) - require.False(t, ra.Add(m)) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) - ra.Push(&acc) + ra.Push(acc) require.Equal(t, 1, len(acc.Metrics)) require.Equal(t, int64(203), acc.Metrics[0].Fields["sum"]) } @@ -160,10 +206,10 @@ func TestAddAndPushOnePeriod(t *testing.T) { Period: time.Millisecond * 500, }) require.NoError(t, ra.Config.Filter.Compile()) - acc := testutil.Accumulator{} + acc := &testutil.Accumulator{} now := time.Now() - ra.UpdateWindow(now, now.Add(ra.Config.Period)) + ra.updateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -172,9 +218,10 @@ func TestAddAndPushOnePeriod(t *testing.T) { }, time.Now().Add(time.Millisecond*100), telegraf.Untyped) - require.False(t, ra.Add(m)) + require.NoError(t, ra.Add(m, acc)) + require.Len(t, m.FieldList(), 1) - ra.Push(&acc) + ra.Push(acc) acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)}) } @@ -190,7 +237,7 @@ func TestAddDropOriginal(t *testing.T) { require.NoError(t, ra.Config.Filter.Compile()) now := time.Now() - ra.UpdateWindow(now, now.Add(ra.Config.Period)) + ra.updateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -199,7 +246,8 @@ func TestAddDropOriginal(t *testing.T) { }, now, telegraf.Untyped) - require.True(t, ra.Add(m)) + require.NoError(t, ra.Add(m, &testutil.Accumulator{})) + require.Len(t, m.FieldList(), 1) // this metric name doesn't match the filter, so Add will return false m2 := testutil.MustMetric("foobar", @@ -209,7 +257,8 @@ func TestAddDropOriginal(t *testing.T) { }, now, telegraf.Untyped) - require.False(t, ra.Add(m2)) + require.NoError(t, ra.Add(m2, &testutil.Accumulator{})) + require.Len(t, m.FieldList(), 1) } func TestAddDoesNotModifyMetric(t *testing.T) { @@ -233,7 +282,7 @@ func TestAddDoesNotModifyMetric(t *testing.T) { }, now) expected := m.Copy() - ra.Add(m) + ra.Add(m, &testutil.Accumulator{}) testutil.RequireMetricEqual(t, expected, m) } From 27759b9c90249e520a9517010bcd6544172f0a18 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 21 Jul 2021 10:36:16 -0500 Subject: [PATCH 06/48] config api pieces --- agent/agentcontrol_test.go | 259 +++++++ cmd/telegraf/telegraf.go | 39 +- internal/channel/fanout.go | 1 + internal/channel/fanout_test.go | 1 + internal/channel/relay.go | 44 ++ internal/channel/relay_test.go | 63 ++ output.go | 14 - plugin.go | 4 +- plugins/configs/all/all.go | 5 + plugins/configs/api/controller.go | 920 +++++++++++++++++++++++++ plugins/configs/api/controller_test.go | 554 +++++++++++++++ plugins/configs/api/listener.go | 123 ++++ plugins/configs/api/listener_test.go | 103 +++ plugins/configs/api/plugin.go | 75 ++ plugins/configs/api/testdata.lp | 5 + plugins/configs/registry.go | 7 + 16 files changed, 2181 insertions(+), 36 deletions(-) create mode 100644 agent/agentcontrol_test.go create mode 100644 internal/channel/fanout.go create mode 100644 internal/channel/fanout_test.go create mode 100644 internal/channel/relay.go create mode 100644 internal/channel/relay_test.go create mode 100644 plugins/configs/all/all.go create mode 100644 plugins/configs/api/controller.go create mode 100644 plugins/configs/api/controller_test.go create mode 100644 plugins/configs/api/listener.go create mode 100644 plugins/configs/api/listener_test.go create mode 100644 plugins/configs/api/plugin.go create mode 100644 plugins/configs/api/testdata.lp create mode 100644 plugins/configs/registry.go diff --git a/agent/agentcontrol_test.go b/agent/agentcontrol_test.go new file mode 100644 index 0000000000000..395ee0b1d4c14 --- /dev/null +++ b/agent/agentcontrol_test.go @@ -0,0 +1,259 @@ +package agent + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var now = time.Date(2021, 4, 9, 0, 0, 0, 0, time.UTC) + +func TestAgentPluginControllerLifecycle(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cfg := config.NewConfig() + a := NewAgent(ctx, cfg) + // cfg.SetAgent(a) + inp := &testInputPlugin{} + _ = inp.Init() + ri := models.NewRunningInput(inp, &models.InputConfig{Name: "in"}) + a.AddInput(ri) + go a.RunInput(ri, time.Now()) + + m, _ := metric.New("testing", + map[string]string{ + "country": "canada", + }, + map[string]interface{}{ + "population": 37_590_000, + }, + now) + + rp := models.NewRunningProcessor(&testProcessorPlugin{}, &models.ProcessorConfig{Name: "proc"}) + a.AddProcessor(rp) + go a.RunProcessor(rp) + + outputCtx, outputCancel := context.WithCancel(context.Background()) + o := &testOutputPlugin{} + _ = o.Init() + ro := models.NewRunningOutput(o, &models.OutputConfig{Name: "out"}, 100, 100) + a.AddOutput(ro) + go a.RunOutput(outputCtx, ro) + + go a.RunWithAPI(outputCancel) + + inp.injectMetric(m) + + waitForStatus(t, ri, "running", 1*time.Second) + waitForStatus(t, rp, "running", 1*time.Second) + waitForStatus(t, ro, "running", 1*time.Second) + + cancel() + + waitForStatus(t, ri, "dead", 1*time.Second) + waitForStatus(t, rp, "dead", 1*time.Second) + waitForStatus(t, ro, "dead", 1*time.Second) + + require.Len(t, o.receivedMetrics, 1) + expected := testutil.MustMetric("testing", + map[string]string{ + "country": "canada", + }, + map[string]interface{}{ + "population": 37_590_000, + "capital": "Ottawa", + }, + now) + testutil.RequireMetricEqual(t, expected, o.receivedMetrics[0]) + +} + +func TestAgentPluginConnectionsAfterAddAndRemoveProcessor(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cfg := config.NewConfig() + a := NewAgent(ctx, cfg) + // cfg.SetAgent(a) + + // start an input + inp := &testInputPlugin{} + _ = inp.Init() + ri := models.NewRunningInput(inp, &models.InputConfig{Name: "in"}) + a.AddInput(ri) + go a.RunInput(ri, time.Now()) + + // start output + outputCtx, outputCancel := context.WithCancel(context.Background()) + o := &testOutputPlugin{} + _ = o.Init() + ro := models.NewRunningOutput(o, &models.OutputConfig{Name: "out"}, 100, 100) + a.AddOutput(ro) + go a.RunOutput(outputCtx, ro) + + // Run agent + go a.RunWithAPI(outputCancel) + + // wait for plugins to start + waitForStatus(t, ri, "running", 1*time.Second) + waitForStatus(t, ro, "running", 1*time.Second) + + // inject a metric into the input plugin as if it collected it + m, _ := metric.New("mojo", nil, map[string]interface{}{"jenkins": "leroy"}, now) + inp.injectMetric(m) + + // wait for the output to get it + o.wait(1) + testutil.RequireMetricEqual(t, m, o.receivedMetrics[0]) + o.clear() + + // spin up new processor + rp := models.NewRunningProcessor(&testProcessorPlugin{}, &models.ProcessorConfig{Name: "proc"}) + a.AddProcessor(rp) + go a.RunProcessor(rp) + + // wait for the processor to start + waitForStatus(t, rp, "running", 5*time.Second) + + // inject a metric into the input + inp.injectMetric(m) + // wait for it to arrive + o.wait(1) + + // create the expected output for comparison + expected := m.Copy() + expected.AddField("capital", "Ottawa") + + testutil.RequireMetricEqual(t, expected, o.receivedMetrics[0]) + + o.clear() + + // stop processor and wait for it to stop + a.StopProcessor(rp) + waitForStatus(t, rp, "dead", 5*time.Second) + + // inject a new metric + inp.injectMetric(m) + + // wait for the output to get it + o.wait(1) + testutil.RequireMetricEqual(t, m, o.receivedMetrics[0]) + o.clear() + + // cancel the app's context + cancel() + + // wait for plugins to stop + waitForStatus(t, ri, "dead", 5*time.Second) + waitForStatus(t, ro, "dead", 5*time.Second) +} + +type hasState interface { + GetState() models.PluginState +} + +func waitForStatus(t *testing.T, stateable hasState, waitStatus string, timeout time.Duration) { + timeoutAt := time.Now().Add(timeout) + for timeoutAt.After(time.Now()) { + if stateable.GetState().String() == waitStatus { + return + } + time.Sleep(10 * time.Millisecond) + } + require.FailNow(t, "timed out waiting for status "+waitStatus) +} + +type testInputPlugin struct { + sync.Mutex + *sync.Cond + started bool + acc telegraf.Accumulator +} + +func (p *testInputPlugin) Init() error { + p.Cond = sync.NewCond(&p.Mutex) + return nil +} +func (p *testInputPlugin) SampleConfig() string { return "" } +func (p *testInputPlugin) Description() string { return "testInputPlugin" } +func (p *testInputPlugin) Gather(a telegraf.Accumulator) error { return nil } +func (p *testInputPlugin) Start(a telegraf.Accumulator) error { + println("locking") + p.Lock() + defer p.Unlock() + println("started") + p.acc = a + p.started = true + p.Cond.Broadcast() + return nil +} +func (p *testInputPlugin) Stop() { + println("stopping input (waiting for lock)") + p.Lock() + defer p.Unlock() + println("stopped input") +} +func (p *testInputPlugin) injectMetric(m telegraf.Metric) { + p.Lock() + defer p.Unlock() + for !p.started { + p.Cond.Wait() + } + p.acc.AddMetric(m) +} + +type testProcessorPlugin struct { +} + +func (p *testProcessorPlugin) Init() error { return nil } +func (p *testProcessorPlugin) SampleConfig() string { return "" } +func (p *testProcessorPlugin) Description() string { return "testProcessorPlugin" } +func (p *testProcessorPlugin) Start(acc telegraf.Accumulator) error { return nil } +func (p *testProcessorPlugin) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { + metric.AddField("capital", "Ottawa") + acc.AddMetric(metric) + return nil +} +func (p *testProcessorPlugin) Stop() error { return nil } + +type testOutputPlugin struct { + sync.Mutex + *sync.Cond + receivedMetrics []telegraf.Metric +} + +func (p *testOutputPlugin) Init() error { + p.Cond = sync.NewCond(&p.Mutex) + return nil +} +func (p *testOutputPlugin) SampleConfig() string { return "" } +func (p *testOutputPlugin) Description() string { return "testOutputPlugin" } +func (p *testOutputPlugin) Connect() error { return nil } +func (p *testOutputPlugin) Close() error { return nil } +func (p *testOutputPlugin) Write(metrics []telegraf.Metric) error { + p.Lock() + defer p.Unlock() + p.receivedMetrics = append(p.receivedMetrics, metrics...) + p.Broadcast() + return nil +} + +// Wait for the given number of metrics to arrive +func (p *testOutputPlugin) wait(n int) { + p.Lock() + defer p.Unlock() + for len(p.receivedMetrics) < n { + p.Cond.Wait() + } +} + +func (p *testOutputPlugin) clear() { + p.Lock() + defer p.Unlock() + p.receivedMetrics = nil +} diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 688c1e5bdd6c5..b67603acad3cb 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -23,11 +23,15 @@ import ( "github.com/influxdata/telegraf/internal/goplugin" "github.com/influxdata/telegraf/logger" _ "github.com/influxdata/telegraf/plugins/aggregators/all" + _ "github.com/influxdata/telegraf/plugins/configs" + _ "github.com/influxdata/telegraf/plugins/configs/all" "github.com/influxdata/telegraf/plugins/inputs" _ "github.com/influxdata/telegraf/plugins/inputs/all" "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/influxdata/telegraf/plugins/outputs/all" _ "github.com/influxdata/telegraf/plugins/processors/all" + _ "github.com/influxdata/telegraf/plugins/storage" + _ "github.com/influxdata/telegraf/plugins/storage/all" "gopkg.in/tomb.v1" ) @@ -197,35 +201,34 @@ func runAgent(ctx context.Context, c := config.NewConfig() c.OutputFilters = outputFilters c.InputFilters = inputFilters + + ag := agent.NewAgent(ctx, c) + c.SetAgent(ag) + outputCtx, outputCancel := context.WithCancel(context.Background()) + defer outputCancel() + var err error // providing no "config" flag should load default config if len(fConfigs) == 0 { - err = c.LoadConfig("") + err = c.LoadConfig(ctx, outputCtx, "") if err != nil { return err } } for _, fConfig := range fConfigs { - err = c.LoadConfig(fConfig) + err = c.LoadConfig(ctx, outputCtx, fConfig) if err != nil { return err } } for _, fConfigDirectory := range fConfigDirs { - err = c.LoadDirectory(fConfigDirectory) + err = c.LoadDirectory(ctx, outputCtx, fConfigDirectory) if err != nil { return err } } - if !*fTest && len(c.Outputs) == 0 { - return errors.New("Error: no outputs found, did you provide a valid config file?") - } - if *fPlugins == "" && len(c.Inputs) == 0 { - return errors.New("Error: no inputs found, did you provide a valid config file?") - } - if int64(c.Agent.Interval) <= 0 { return fmt.Errorf("Agent interval must be positive, found %v", c.Agent.Interval) } @@ -234,11 +237,6 @@ func runAgent(ctx context.Context, return fmt.Errorf("Agent flush_interval must be positive; found %v", c.Agent.Interval) } - ag, err := agent.NewAgent(c) - if err != nil { - return err - } - // Setup logging as configured. telegraf.Debug = ag.Config.Agent.Debug || *fDebug logConfig := logger.LogConfig{ @@ -255,13 +253,13 @@ func runAgent(ctx context.Context, logger.SetupLogging(logConfig) if *fRunOnce { - wait := time.Duration(*fTestWait) * time.Second - return ag.Once(ctx, wait) + // wait := time.Duration(*fTestWait) * time.Second + // return ag.Once(ctx, wait) } if *fTest || *fTestWait != 0 { - wait := time.Duration(*fTestWait) * time.Second - return ag.Test(ctx, wait) + // wait := time.Duration(*fTestWait) * time.Second + // return ag.Test(ctx, wait) } log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) @@ -288,7 +286,8 @@ func runAgent(ctx context.Context, } } - return ag.Run(ctx) + ag.RunWithAPI(outputCancel) + return nil } func usageExit(rc int) { diff --git a/internal/channel/fanout.go b/internal/channel/fanout.go new file mode 100644 index 0000000000000..9c86d4ad8b45c --- /dev/null +++ b/internal/channel/fanout.go @@ -0,0 +1 @@ +package channel diff --git a/internal/channel/fanout_test.go b/internal/channel/fanout_test.go new file mode 100644 index 0000000000000..9c86d4ad8b45c --- /dev/null +++ b/internal/channel/fanout_test.go @@ -0,0 +1 @@ +package channel diff --git a/internal/channel/relay.go b/internal/channel/relay.go new file mode 100644 index 0000000000000..c02fd726d9d26 --- /dev/null +++ b/internal/channel/relay.go @@ -0,0 +1,44 @@ +package channel + +import ( + "sync" + + "github.com/influxdata/telegraf" +) + +type Relay struct { + sync.Mutex + src <-chan telegraf.Metric + dst chan<- telegraf.Metric +} + +func NewRelay(src <-chan telegraf.Metric, dst chan<- telegraf.Metric) *Relay { + return &Relay{ + src: src, + dst: dst, + } +} + +func (r *Relay) Start() { + go func() { + for m := range r.src { + r.GetDest() <- m + } + close(r.GetDest()) + }() +} + +func (r *Relay) SetDest(dst chan<- telegraf.Metric) { + r.Lock() + defer r.Unlock() + r.dst = dst +} + +func (r *Relay) GetDest() chan<- telegraf.Metric { + r.Lock() + defer r.Unlock() + if r.dst == nil { + panic("dst channel should never be nil") + } + return r.dst +} diff --git a/internal/channel/relay_test.go b/internal/channel/relay_test.go new file mode 100644 index 0000000000000..891b14871cc60 --- /dev/null +++ b/internal/channel/relay_test.go @@ -0,0 +1,63 @@ +package channel + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestUnbufferedConnection(t *testing.T) { + src := make(chan telegraf.Metric) + dst := make(chan telegraf.Metric) + ch := NewRelay(src, dst) + ch.Start() + m := testutil.MustMetric("test", nil, nil, time.Now()) + go func() { + src <- m + close(src) + }() + m2 := <-dst + testutil.RequireMetricEqual(t, m, m2) +} + +func TestBufferedConnection(t *testing.T) { + src := make(chan telegraf.Metric, 1) + dst := make(chan telegraf.Metric, 1) + ch := NewRelay(src, dst) + ch.Start() + m := testutil.MustMetric("test", nil, nil, time.Now()) + src <- m + close(src) + m2 := <-dst + testutil.RequireMetricEqual(t, m, m2) +} + +func TestCloseIsRelayed(t *testing.T) { + src := make(chan telegraf.Metric, 1) + dst := make(chan telegraf.Metric, 1) + ch := NewRelay(src, dst) + ch.Start() + close(src) + _, ok := <-dst + require.False(t, ok) +} + +func TestCanReassignDest(t *testing.T) { + src := make(chan telegraf.Metric, 1) + dst1 := make(chan telegraf.Metric, 1) + dst2 := make(chan telegraf.Metric, 1) + ch := NewRelay(src, dst1) + ch.Start() + m1 := testutil.MustMetric("test1", nil, nil, time.Now()) + m2 := testutil.MustMetric("test2", nil, nil, time.Now()) + src <- m1 + m3 := <-dst1 + ch.SetDest(dst2) + src <- m2 + m4 := <-dst2 + testutil.RequireMetricEqual(t, m1, m3) + testutil.RequireMetricEqual(t, m2, m4) +} diff --git a/output.go b/output.go index 52755b5da3f96..1ff4dfd2f7a7e 100644 --- a/output.go +++ b/output.go @@ -13,17 +13,3 @@ type Output interface { // Write takes in group of points to be written to the Output Write(metrics []Metric) error } - -// AggregatingOutput adds aggregating functionality to an Output. May be used -// if the Output only accepts a fixed set of aggregations over a time period. -// These functions may be called concurrently to the Write function. -type AggregatingOutput interface { - Output - - // Add the metric to the aggregator - Add(in Metric) - // Push returns the aggregated metrics and is called every flush interval. - Push() []Metric - // Reset signals the the aggregator period is completed. - Reset() -} diff --git a/plugin.go b/plugin.go index f9dcaeac0344c..73d10def431ff 100644 --- a/plugin.go +++ b/plugin.go @@ -16,10 +16,10 @@ type Initializer interface { // not part of the interface, but will receive an injected logger if it's set. // eg: Log telegraf.Logger `toml:"-"` type PluginDescriber interface { - // SampleConfig returns the default configuration of the Processor + // SampleConfig returns the default configuration of the Plugin SampleConfig() string - // Description returns a one-sentence description on the Processor + // Description returns a one-sentence description on the Plugin Description() string } diff --git a/plugins/configs/all/all.go b/plugins/configs/all/all.go new file mode 100644 index 0000000000000..ffc4394d7f537 --- /dev/null +++ b/plugins/configs/all/all.go @@ -0,0 +1,5 @@ +package all + +import ( + _ "github.com/influxdata/telegraf/plugins/configs/api" +) diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go new file mode 100644 index 0000000000000..4fcbf046ca996 --- /dev/null +++ b/plugins/configs/api/controller.go @@ -0,0 +1,920 @@ +package api + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + "sort" + "strings" + "time" + "unicode" + + "github.com/alecthomas/units" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/plugins/aggregators" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/processors" + "github.com/influxdata/telegraf/plugins/serializers" +) + +// api is the general interface to interacting with Telegraf's current config +type api struct { + agent config.AgentController + config *config.Config + + // api shutdown context + ctx context.Context + outputCtx context.Context +} + +func newAPI(ctx context.Context, cfg *config.Config, agent config.AgentController) (_ *api, outputCancel context.CancelFunc) { + c := &api{ + config: cfg, + agent: agent, + ctx: ctx, + } + c.outputCtx, outputCancel = context.WithCancel(context.Background()) + return c, outputCancel +} + +// PluginConfig is a plugin name and details about the config fields. +type PluginConfigTypeInfo struct { + Name string `json:"name"` + Config map[string]FieldConfig `json:"config"` +} + +type PluginConfig struct { + ID string `json:"id"` // unique identifer + PluginConfigCreate +} + +type PluginConfigCreate struct { + Name string `json:"name"` // name of the plugin + Config map[string]interface{} `json:"config"` // map field name to field value +} + +// FieldConfig describes a single field +type FieldConfig struct { + Type FieldType `json:"type,omitempty"` // see FieldType + Default interface{} `json:"default,omitempty"` // whatever the default value is + Format string `json:"format,omitempty"` // type-specific format info. eg a url is a string, but has url-formatting rules. + Required bool `json:"required,omitempty"` // this is sort of validation, which I'm not sure belongs here. + SubType FieldType `json:"sub_type,omitempty"` // The subtype. map[string]int subtype is int. []string subtype is string. + SubFields map[string]FieldConfig `json:"sub_fields,omitempty"` // only for struct/object/FieldConfig types +} + +// FieldType enumerable type. Describes config field type information to external applications +type FieldType string + +// FieldTypes +const ( + FieldTypeUnknown FieldType = "" + FieldTypeString FieldType = "string" + FieldTypeInteger FieldType = "integer" + FieldTypeDuration FieldType = "duration" // a special case of integer + FieldTypeSize FieldType = "size" // a special case of integer + FieldTypeFloat FieldType = "float" + FieldTypeBool FieldType = "bool" + FieldTypeInterface FieldType = "any" + FieldTypeSlice FieldType = "array" // array + FieldTypeFieldConfig FieldType = "object" // a FieldConfig? + FieldTypeMap FieldType = "map" // always map[string]FieldConfig ? +) + +// Plugin is an instance of a plugin running with a specific configuration +type Plugin struct { + ID models.PluginID + Name string + // State() + Config map[string]FieldConfig +} + +func (a *api) ListPluginTypes() []PluginConfigTypeInfo { + result := []PluginConfigTypeInfo{} + inputNames := []string{} + for name := range inputs.Inputs { + inputNames = append(inputNames, name) + } + sort.Strings(inputNames) + + for _, name := range inputNames { + creator := inputs.Inputs[name] + cfg := PluginConfigTypeInfo{ + Name: name, + Config: map[string]FieldConfig{}, + } + + p := creator() + getFieldConfig(p, cfg.Config) + + result = append(result, cfg) + } + // TODO: add more types? + return result +} + +func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { + if a == nil { + panic("api is nil") + } + for _, v := range a.agent.RunningInputs() { + p := Plugin{ + ID: idToString(v.ID), + Name: v.Config.Name, + Config: map[string]FieldConfig{}, + } + getFieldConfig(v.Config, p.Config) + runningPlugins = append(runningPlugins, p) + } + for _, v := range a.agent.RunningProcessors() { + p := Plugin{ + ID: idToString(v.GetID()), + Name: v.LogName(), + Config: map[string]FieldConfig{}, + } + val := reflect.ValueOf(v) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + pluginCfg := val.FieldByName("Config").Interface() + getFieldConfig(pluginCfg, p.Config) + runningPlugins = append(runningPlugins, p) + } + for _, v := range a.agent.RunningOutputs() { + p := Plugin{ + ID: idToString(v.ID), + Name: v.Config.Name, + Config: map[string]FieldConfig{}, + } + getFieldConfig(v.Config, p.Config) + runningPlugins = append(runningPlugins, p) + + } + // TODO: add more types? + return runningPlugins +} + +func (a *api) UpdatePlugin(ID models.PluginID, config PluginConfigCreate) error { + // TODO: shut down plugin and start a new plugin with the same id. + return nil +} + +func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { + log.Printf("I! [configapi] creating plugin %q", config.Name) + + parts := strings.Split(config.Name, ".") + pluginType, name := parts[0], parts[1] + switch pluginType { + case "inputs": + // add an input + input, ok := inputs.Inputs[name] + if !ok { + return "", fmt.Errorf("Error finding plugin with name %s", name) + } + // create a copy + i := input() + // set the config + if err := setFieldConfig(config.Config, i); err != nil { + return "", err + } + + // get parser! + if t, ok := i.(parsers.ParserInput); ok { + pc := &parsers.Config{ + MetricName: name, + JSONStrict: true, + DataFormat: "influx", + } + if err := setFieldConfig(config.Config, pc); err != nil { + return "", err + } + parser, err := parsers.NewParser(pc) + if err != nil { + return "", err + } + t.SetParser(parser) + } + + if t, ok := i.(parsers.ParserFuncInput); ok { + pc := &parsers.Config{ + MetricName: name, + JSONStrict: true, + DataFormat: "influx", + } + if err := setFieldConfig(config.Config, pc); err != nil { + return "", err + } + + t.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(pc) + }) + } + + // start it and put it into the agent manager? + pluginConfig := &models.InputConfig{Name: name} + if err := setFieldConfig(config.Config, pluginConfig); err != nil { + return "", err + } + + if err := setFieldConfig(config.Config, &pluginConfig.Filter); err != nil { + return "", err + } + + rp := models.NewRunningInput(i, pluginConfig) + rp.SetDefaultTags(a.config.Tags) + + if err := rp.Init(); err != nil { + return "", fmt.Errorf("could not initialize plugin %w", err) + } + + a.agent.AddInput(rp) + + go a.agent.RunInput(rp, time.Now()) + + return idToString(rp.ID), nil + case "outputs": + // add an output + output, ok := outputs.Outputs[name] + if !ok { + return "", fmt.Errorf("Error finding plugin with name %s", name) + } + // create a copy + o := output() + // set the config + if err := setFieldConfig(config.Config, o); err != nil { + return "", err + } + // start it and put it into the agent manager? + pluginConfig := &models.OutputConfig{Name: name} + if err := setFieldConfig(config.Config, pluginConfig); err != nil { + return "", err + } + + if err := setFieldConfig(config.Config, &pluginConfig.Filter); err != nil { + return "", err + } + + if t, ok := o.(serializers.SerializerOutput); ok { + sc := &serializers.Config{ + TimestampUnits: time.Duration(1 * time.Second), + DataFormat: "influx", + } + if err := setFieldConfig(config.Config, sc); err != nil { + return "", err + } + serializer, err := serializers.NewSerializer(sc) + if err != nil { + return "", err + } + t.SetSerializer(serializer) + } + + ro := models.NewRunningOutput(o, pluginConfig, a.config.Agent.MetricBatchSize, a.config.Agent.MetricBufferLimit) + + if err := ro.Init(); err != nil { + return "", fmt.Errorf("could not initialize plugin %w", err) + } + + a.agent.AddOutput(ro) + + go a.agent.RunOutput(a.outputCtx, ro) + + return idToString(ro.ID), nil + case "aggregators": + aggregator, ok := aggregators.Aggregators[name] + if !ok { + return "", fmt.Errorf("Error finding aggregator plugin with name %s", name) + } + agg := aggregator() + + // set the config + if err := setFieldConfig(config.Config, agg); err != nil { + return "", err + } + aggCfg := &models.AggregatorConfig{ + Name: name, + Delay: time.Millisecond * 100, + Period: time.Second * 30, + Grace: time.Second * 0, + } + if err := setFieldConfig(config.Config, aggCfg); err != nil { + return "", err + } + + if err := setFieldConfig(config.Config, &aggCfg.Filter); err != nil { + return "", err + } + + ra := models.NewRunningAggregator(agg, aggCfg) + if err := ra.Init(); err != nil { + return "", fmt.Errorf("could not initialize plugin %w", err) + } + a.agent.AddProcessor(ra) + go a.agent.RunProcessor(ra) + + return idToString(ra.ID), nil + + case "processors": + processor, ok := processors.Processors[name] + if !ok { + return "", fmt.Errorf("Error finding processor plugin with name %s", name) + } + // create a copy + p := processor() + rootp := p.(telegraf.PluginDescriber) + if unwrapme, ok := rootp.(unwrappable); ok { + rootp = unwrapme.Unwrap() + } + // set the config + if err := setFieldConfig(config.Config, rootp); err != nil { + return "", err + } + // start it and put it into the agent manager? + pluginConfig := &models.ProcessorConfig{Name: name} + if err := setFieldConfig(config.Config, pluginConfig); err != nil { + return "", err + } + if err := setFieldConfig(config.Config, &pluginConfig.Filter); err != nil { + return "", err + } + + rp := models.NewRunningProcessor(p, pluginConfig) + + if err := rp.Init(); err != nil { + return "", fmt.Errorf("could not initialize plugin %w", err) + } + + a.agent.AddProcessor(rp) + + go a.agent.RunProcessor(rp) + + return idToString(rp.ID), nil + default: + return "", errors.New("Unknown plugin type") + } +} + +func (a *api) GetPluginStatus(ID models.PluginID) models.PluginState { + for _, v := range a.agent.RunningInputs() { + if v.ID == ID.Uint64() { + return v.GetState() + } + } + for _, v := range a.agent.RunningProcessors() { + if v.GetID() == ID.Uint64() { + return v.GetState() + } + } + for _, v := range a.agent.RunningOutputs() { + if v.ID == ID.Uint64() { + return v.GetState() + } + } + return models.PluginStateDead +} + +func (a *api) getPluginByID(ID models.PluginID) { + +} + +func (a *api) DeletePlugin(ID models.PluginID) error { + for _, v := range a.agent.RunningInputs() { + if v.ID == ID.Uint64() { + a.agent.StopInput(v) + return nil + } + } + for _, v := range a.agent.RunningProcessors() { + if v.GetID() == ID.Uint64() { + a.agent.StopProcessor(v) + return nil + } + } + for _, v := range a.agent.RunningOutputs() { + if v.ID == ID.Uint64() { + a.agent.StopOutput(v) + return nil + } + } + return nil +} + +// func (a *API) PausePlugin(ID models.PluginID) { + +// } + +// func (a *API) ResumePlugin(ID models.PluginID) { + +// } + +// setFieldConfig takes a map of field names to field values and sets them on the plugin +func setFieldConfig(cfg map[string]interface{}, p interface{}) error { + destStruct := reflect.ValueOf(p) + if destStruct.Kind() == reflect.Ptr { + destStruct = destStruct.Elem() + } + keys := make([]string, 0, len(cfg)) + for k := range cfg { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := cfg[k] + destField, destFieldType := getFieldByName(destStruct, k) // get by tag + if !destField.IsValid() { + continue + } + if !destField.CanSet() { + destField.Addr() + // TODO: error? + fmt.Println("cannot set", k, destFieldType.Name()) + continue + } + val := reflect.ValueOf(v) + if err := setObject(val, destField, destFieldType); err != nil { + return fmt.Errorf("Could not set field %s: %w", k, err) + } + + } + return nil +} + +// getFieldByName gets a reference to a struct field from it's name, considering the tag names +func getFieldByName(destStruct reflect.Value, fieldName string) (reflect.Value, reflect.Type) { + if destStruct.Kind() == reflect.Ptr { + if destStruct.IsNil() { + return reflect.ValueOf(nil), reflect.TypeOf(nil) + } + destStruct = destStruct.Elem() + } + // may be an interface to a struct + if destStruct.Kind() == reflect.Interface { + destStruct = destStruct.Elem() + } + destStructType := reflect.TypeOf(destStruct.Interface()) + for i := 0; i < destStruct.NumField(); i++ { + field := destStruct.Field(i) + fieldType := destStructType.Field(i) + if fieldType.Type.Kind() == reflect.Struct && fieldType.Anonymous { + v, t := getFieldByName(field, fieldName) + if t != reflect.TypeOf(nil) { + return v, t + } + } + if fieldType.Tag.Get("toml") == fieldName { + return field, fieldType.Type + } + if strings.ToLower(fieldType.Name) == fieldName && unicode.IsUpper(rune(fieldType.Name[0])) { + return field, fieldType.Type + } + // handle snake string case conversion + } + return reflect.ValueOf(nil), reflect.TypeOf(nil) +} + +// getFieldConfig expects a plugin, p, (of any type) and a map to populate. +// it calls itself recursively so p must be an interface{} +func getFieldConfig(p interface{}, cfg map[string]FieldConfig) { + structVal := reflect.ValueOf(p) + structType := structVal.Type() + for structType.Kind() == reflect.Ptr { + structVal = structVal.Elem() + structType = structType.Elem() + } + + // safety check. + if structType.Kind() != reflect.Struct { + // woah, what? + panic(fmt.Sprintf("getFieldConfig expected a struct type, but got %v %v", p, structType.String())) + } + // structType.NumField() + + for i := 0; i < structType.NumField(); i++ { + var f reflect.Value + if !structVal.IsZero() { + f = structVal.Field(i) + } + _ = f + ft := structType.Field(i) + + ftType := ft.Type + if ftType.Kind() == reflect.Ptr { + ftType = ftType.Elem() + // f = f.Elem() + } + + // check if it's not exported, and skip if so. + if len(ft.Name) > 0 && strings.ToLower(string(ft.Name[0])) == string(ft.Name[0]) { + continue + } + tomlTag := ft.Tag.Get("toml") + if tomlTag == "-" { + continue + } + switch ftType.Kind() { + case reflect.Func, reflect.Interface: + continue + } + + // if this field is a struct, get the structure of it. + if ftType.Kind() == reflect.Struct && !isInternalStructFieldType(ft.Type) { + if ft.Anonymous { // embedded + t := getSubTypeType(ft) + i := reflect.New(t) + getFieldConfig(i.Interface(), cfg) + } else { + subCfg := map[string]FieldConfig{} + t := getSubTypeType(ft) + i := reflect.New(t) + getFieldConfig(i.Interface(), subCfg) + cfg[ft.Name] = FieldConfig{ + Type: FieldTypeFieldConfig, + SubFields: subCfg, + SubType: getFieldType(t), + } + } + continue + } + + // all other field types... + fc := FieldConfig{ + Type: getFieldTypeFromStructField(ft), + Format: ft.Tag.Get("format"), + Required: ft.Tag.Get("required") == "true", + } + + // set the default value for the field + if f.IsValid() && !f.IsZero() { + fc.Default = f.Interface() + // special handling for internal struct types so the struct doesn't serialize to an object. + if d, ok := fc.Default.(internal.Duration); ok { + fc.Default = d.Duration + } + if s, ok := fc.Default.(internal.Size); ok { + fc.Default = s.Size + } + } + + // if we found a slice of objects, get the structure of that object + if hasSubType(ft.Type) { + t := getSubTypeType(ft) + n := t.Name() + _ = n + fc.SubType = getFieldType(t) + + if t.Kind() == reflect.Struct { + i := reflect.New(t) + subCfg := map[string]FieldConfig{} + getFieldConfig(i.Interface(), subCfg) + fc.SubFields = subCfg + } + } + // if we found a map of objects, get the structure of that object + + cfg[ft.Name] = fc + } +} + +func setObject(from, to reflect.Value, destType reflect.Type) error { + if from.Kind() == reflect.Interface { + from = reflect.ValueOf(from.Interface()) + } + // switch on source type + switch from.Kind() { + case reflect.Bool: + if to.Kind() == reflect.Ptr { + ptr := reflect.New(destType.Elem()) + to.Set(ptr) + to = ptr.Elem() + } + if to.Kind() == reflect.Interface { + to.Set(from) + } else { + to.SetBool(from.Bool()) + } + case reflect.String: + if to.Kind() == reflect.Ptr { + ptr := reflect.New(destType.Elem()) + destType = destType.Elem() + to.Set(ptr) + to = ptr.Elem() + } + // consider duration and size types + switch destType.String() { + case "time.Duration", "config.Duration": + d, err := time.ParseDuration(from.Interface().(string)) + if err != nil { + return fmt.Errorf("Couldn't parse duration %q: %w", from.Interface().(string), err) + } + to.SetInt(int64(d)) + case "internal.Duration": + d, err := time.ParseDuration(from.Interface().(string)) + if err != nil { + return fmt.Errorf("Couldn't parse duration %q: %w", from.Interface().(string), err) + } + to.FieldByName("Duration").SetInt(int64(d)) + case "internal.Size": + size, err := units.ParseStrictBytes(from.Interface().(string)) + if err != nil { + return fmt.Errorf("Couldn't parse size %q: %w", from.Interface().(string), err) + } + to.FieldByName("Size").SetInt(size) + case "config.Size": + size, err := units.ParseStrictBytes(from.Interface().(string)) + if err != nil { + return fmt.Errorf("Couldn't parse size %q: %w", from.Interface().(string), err) + } + to.SetInt(int64(size)) + // TODO: handle slice types? + default: + // to.SetString(from.Interface().(string)) + to.Set(from) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if to.Kind() == reflect.Ptr { + ptr := reflect.New(destType.Elem()) + destType = destType.Elem() + to.Set(ptr) + to = ptr.Elem() + } + + if destType.String() == "internal.Number" { + n := internal.Number{Value: float64(from.Int())} + to.Set(reflect.ValueOf(n)) + return nil + } + + switch destType.Kind() { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + to.SetUint(uint64(from.Int())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + to.SetInt(from.Int()) + case reflect.Float32, reflect.Float64: + to.SetFloat(float64(from.Float())) + case reflect.Interface: + to.Set(from) + default: + return fmt.Errorf("cannot coerce int type into %s", destType.Kind().String()) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if to.Kind() == reflect.Ptr { + ptr := reflect.New(destType.Elem()) + // destType = destType.Elem() + to.Set(ptr) + to = ptr.Elem() + } + + if destType.String() == "internal.Number" { + n := internal.Number{Value: float64(from.Uint())} + to.Set(reflect.ValueOf(n)) + return nil + } + + if to.Kind() == reflect.Interface { + to.Set(from) + } else { + to.SetUint(from.Uint()) + } + case reflect.Float32, reflect.Float64: + if to.Kind() == reflect.Ptr { + ptr := reflect.New(destType.Elem()) + // destType = destType.Elem() + to.Set(ptr) + to = ptr.Elem() + } + if destType.String() == "internal.Number" { + n := internal.Number{Value: from.Float()} + to.Set(reflect.ValueOf(n)) + return nil + } + + if to.Kind() == reflect.Interface { + to.Set(from) + } else { + to.SetFloat(from.Float()) + } + case reflect.Slice: + if destType.Kind() == reflect.Ptr { + destType = destType.Elem() + to = to.Elem() + } + if destType.Kind() != reflect.Slice { + return fmt.Errorf("error setting slice field into %s", destType.Kind().String()) + } + d := reflect.MakeSlice(destType, from.Len(), from.Len()) + for i := 0; i < from.Len(); i++ { + if err := setObject(from.Index(i), d.Index(i), destType.Elem()); err != nil { + return fmt.Errorf("couldn't set slice element: %w", err) + } + } + to.Set(d) + case reflect.Map: + if destType.Kind() == reflect.Ptr { + destType = destType.Elem() + ptr := reflect.New(destType) + to.Set(ptr) + to = to.Elem() + } + switch destType.Kind() { + case reflect.Struct: + structPtr := reflect.New(destType) + err := setFieldConfig(from.Interface().(map[string]interface{}), structPtr.Interface()) + if err != nil { + return err + } + to.Set(structPtr.Elem()) + case reflect.Map: + //TODO: handle map[string]type + if destType.Key().Kind() != reflect.String { + panic("expecting string types for maps") + } + to.Set(reflect.MakeMap(destType)) + + switch destType.Elem().Kind() { + case reflect.Interface, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, + reflect.Bool: + for _, k := range from.MapKeys() { + t := from.MapIndex(k) + if t.Kind() == reflect.Interface { + t = reflect.ValueOf(t.Interface()) + } + to.SetMapIndex(k, t) + } + case reflect.String: + for _, k := range from.MapKeys() { + t := from.MapIndex(k) + if t.Kind() == reflect.Interface { + t = reflect.ValueOf(t.Interface()) + } + to.SetMapIndex(k, t) + } + // for _, k := range from.MapKeys() { + // v := from.MapIndex(k) + // s := v.Interface().(string) + // to.SetMapIndex(k, reflect.ValueOf(s)) + // } + case reflect.Slice: + for _, k := range from.MapKeys() { + // slice := reflect.MakeSlice(destType.Elem(), 0, 0) + sliceptr := reflect.New(destType.Elem()) + // sliceptr.Elem().Set(slice) + err := setObject(from.MapIndex(k), sliceptr, sliceptr.Type()) + if err != nil { + return fmt.Errorf("could not set slice: %w", err) + } + to.SetMapIndex(k, sliceptr.Elem()) + } + + case reflect.Struct: + for _, k := range from.MapKeys() { + structPtr := reflect.New(destType.Elem()) + err := setFieldConfig( + from.MapIndex(k).Interface().(map[string]interface{}), + structPtr.Interface(), + ) + // err := setObject(from.MapIndex(k), structPtr, structPtr.Type()) + if err != nil { + return fmt.Errorf("could not set struct: %w", err) + } + to.SetMapIndex(k, structPtr.Elem()) + } + + default: + return fmt.Errorf("can't write settings into map of type map[string]%s", destType.Elem().Kind().String()) + } + default: + return fmt.Errorf("Cannot load map into %q", destType.Kind().String()) + // panic("foo") + } + // to.Set(val) + default: + return fmt.Errorf("cannot convert unknown type %s to %s", from.Kind().String(), destType.String()) + } + return nil +} + +// hasSubType returns true when the field has a subtype (map,slice,struct) +func hasSubType(t reflect.Type) bool { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + switch t.Kind() { + case reflect.Slice, reflect.Map: + return true + case reflect.Struct: + switch t.String() { + case "internal.Duration", "config.Duration", "internal.Size", "config.Size": + return false + } + return true + default: + return false + } +} + +// getSubTypeType gets the underlying subtype's reflect.Type +// examples: +// []string => string +// map[string]int => int +// User => User +func getSubTypeType(structField reflect.StructField) reflect.Type { + ft := structField.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + switch ft.Kind() { + case reflect.Slice: + t := ft.Elem() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t + case reflect.Map: + t := ft.Elem() + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t + case reflect.Struct: + return ft + } + panic(ft.String() + " is not a type that has subtype information (map, slice, struct)") +} + +// getFieldType translates reflect.Types to our API field types. +func getFieldType(t reflect.Type) FieldType { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + switch t.Kind() { + case reflect.String: + return FieldTypeString + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + return FieldTypeInteger + case reflect.Float32, reflect.Float64: + return FieldTypeFloat + case reflect.Bool: + return FieldTypeBool + case reflect.Slice: + return FieldTypeSlice + case reflect.Map: + return FieldTypeMap + case reflect.Struct: + switch t.String() { + case "internal.Duration", "config.Duration": + return FieldTypeDuration + case "internal.Size", "config.Size": + return FieldTypeSize + } + return FieldTypeFieldConfig + } + return FieldTypeUnknown +} + +func getFieldTypeFromStructField(structField reflect.StructField) FieldType { + fieldName := structField.Name + ft := structField.Type + result := getFieldType(ft) + if result == FieldTypeUnknown { + panic(fmt.Sprintf("unknown type, name: %q, string: %q", fieldName, ft.String())) + } + return result +} + +func isInternalStructFieldType(t reflect.Type) bool { + switch t.String() { + case "internal.Duration", "config.Duration": + return true + case "internal.Size", "config.Size": + return true + default: + return false + } +} + +func idToString(id uint64) models.PluginID { + return models.PluginID(fmt.Sprintf("%016x", id)) +} + +// make sure these models implement RunningPlugin +var _ models.RunningPlugin = &models.RunningProcessor{} +var _ models.RunningPlugin = &models.RunningAggregator{} +var _ models.RunningPlugin = &models.RunningInput{} +var _ models.RunningPlugin = &models.RunningOutput{} + +type unwrappable interface { + Unwrap() telegraf.Processor +} diff --git a/plugins/configs/api/controller_test.go b/plugins/configs/api/controller_test.go new file mode 100644 index 0000000000000..24dfe65d8d461 --- /dev/null +++ b/plugins/configs/api/controller_test.go @@ -0,0 +1,554 @@ +package api + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/models" + _ "github.com/influxdata/telegraf/plugins/aggregators/all" + "github.com/influxdata/telegraf/plugins/common/kafka" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" + _ "github.com/influxdata/telegraf/plugins/inputs/all" + "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" + _ "github.com/influxdata/telegraf/plugins/outputs/all" + _ "github.com/influxdata/telegraf/plugins/processors/all" + // _ "github.com/influxdata/telegraf/plugins/inputs/cpu" + // _ "github.com/influxdata/telegraf/plugins/outputs/file" + // _ "github.com/influxdata/telegraf/plugins/processors/rename" +) + +// TestListPluginTypes tests that the config api can scrape all existing plugins +// for type information to build a schema. +func TestListPluginTypes(t *testing.T) { + cfg := config.NewConfig() // initalizes API + a := agent.NewAgent(context.Background(), cfg) + + api, cancel := newAPI(context.Background(), cfg, a) + defer cancel() + + pluginConfigs := api.ListPluginTypes() + require.Greater(t, len(pluginConfigs), 10) + // b, _ := json.Marshal(pluginConfigs) + // fmt.Println(string(b)) + + // find the gnmi plugin + var gnmi PluginConfigTypeInfo + for _, conf := range pluginConfigs { + if conf.Name == "gnmi" { + gnmi = conf + break + } + } + + // find the cloudwatch plugin + var cloudwatch PluginConfigTypeInfo + for _, conf := range pluginConfigs { + if conf.Name == "cloudwatch" { + cloudwatch = conf + break + } + } + + // validate a slice of objects + require.EqualValues(t, "array", gnmi.Config["Subscriptions"].Type) + require.EqualValues(t, "object", gnmi.Config["Subscriptions"].SubType) + require.NotNil(t, gnmi.Config["Subscriptions"].SubFields) + require.EqualValues(t, "string", gnmi.Config["Subscriptions"].SubFields["Name"].Type) + + // validate a slice of pointer objects + require.EqualValues(t, "array", cloudwatch.Config["Metrics"].Type) + require.EqualValues(t, "object", cloudwatch.Config["Metrics"].SubType) + require.NotNil(t, cloudwatch.Config["Metrics"].SubFields) + require.EqualValues(t, "array", cloudwatch.Config["Metrics"].SubFields["StatisticExclude"].Type) + require.EqualValues(t, "array", cloudwatch.Config["Metrics"].SubFields["MetricNames"].Type) + + // validate a map of strings + require.EqualValues(t, "map", gnmi.Config["Aliases"].Type) + require.EqualValues(t, "string", gnmi.Config["Aliases"].SubType) + + // check a default value + require.EqualValues(t, "proto", gnmi.Config["Encoding"].Default) + require.EqualValues(t, 10*1e9, gnmi.Config["Redial"].Default) + + // check anonymous composed fields + require.EqualValues(t, "bool", gnmi.Config["InsecureSkipVerify"].Type) + + // TODO: check named composed fields + +} + +func TestInputPluginLifecycle(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.NewConfig() // initalizes API + a := agent.NewAgent(ctx, cfg) + api, outputCancel := newAPI(ctx, cfg, a) + defer outputCancel() + + // t.Log("Running") + + go a.RunWithAPI(outputCancel) + // TODO: defer a.Shutdown() + + // create + // t.Log("Create plugin") + newPluginID, err := api.CreatePlugin(PluginConfigCreate{ + Name: "inputs.cpu", + Config: map[string]interface{}{ + "percpu": true, + "totalcpu": true, + "collect_cpu_time": true, + "report_active": true, + }, + }) + require.NoError(t, err) + require.NotZero(t, len(newPluginID)) + + // get plugin status + // t.Log("wait for start state") + waitForStatus(t, api, newPluginID, "running", 20*time.Second) + + // list running + // t.Log("List running plugins") + runningPlugins := api.ListRunningPlugins() + require.Len(t, runningPlugins, 1) + + status := api.GetPluginStatus(newPluginID) + require.Equal(t, "running", status.String()) + // delete + // t.Log("delete plugin") + err = api.DeletePlugin(newPluginID) + require.NoError(t, err) + + // t.Log("wait for dead state") + waitForStatus(t, api, newPluginID, "dead", 300*time.Millisecond) + + // get plugin status until dead + status = api.GetPluginStatus(newPluginID) + require.Equal(t, "dead", status.String()) + + // list running should have none + // t.Log("list plugins") + runningPlugins = api.ListRunningPlugins() + require.Len(t, runningPlugins, 0) +} + +func TestAllPluginLifecycle(t *testing.T) { + runCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.NewConfig() + a := agent.NewAgent(context.Background(), cfg) + + api, outputCancel := newAPI(runCtx, cfg, a) + defer outputCancel() + + // t.Log("Running") + go a.RunWithAPI(outputCancel) + + // create + // t.Log("Create plugin") + pluginIDs := []models.PluginID{} + newPluginID, err := api.CreatePlugin(PluginConfigCreate{ + Name: "inputs.cpu", + Config: map[string]interface{}{}, + }) + // t.Log("inputs.cpu", newPluginID) + pluginIDs = append(pluginIDs, newPluginID) + require.NoError(t, err) + require.NotZero(t, len(newPluginID)) + + newPluginID, err = api.CreatePlugin(PluginConfigCreate{ + Name: "processors.rename", + Config: map[string]interface{}{ + "replace": []map[string]interface{}{{ + "tag": "hostname", + "dest": "a_host", + }}, + }, + }) + require.NoError(t, err) + pluginIDs = append(pluginIDs, newPluginID) + require.NotZero(t, len(newPluginID)) + + newPluginID, err = api.CreatePlugin(PluginConfigCreate{ + Name: "outputs.file", + Config: map[string]interface{}{ + "files": []string{"stdout"}, + }, + }) + // t.Log("outputs.file", newPluginID) + pluginIDs = append(pluginIDs, newPluginID) + require.NoError(t, err) + require.NotZero(t, len(newPluginID)) + + for _, id := range pluginIDs { + // t.Log("waiting for plugin", id) + waitForStatus(t, api, id, "running", 10*time.Second) + } + + // list running + // t.Log("List running plugins") + runningPlugins := api.ListRunningPlugins() + require.Len(t, runningPlugins, 3) + + time.Sleep(5 * time.Second) + + // delete + // t.Log("delete plugins") + for _, id := range pluginIDs { + err = api.DeletePlugin(id) + require.NoError(t, err) + } + + // t.Log("wait for dead state") + for _, id := range pluginIDs { + waitForStatus(t, api, id, "dead", 300*time.Millisecond) + } + + // plugins might not be delisted immediately.. loop until done + for { + // list running should have none + runningPlugins = api.ListRunningPlugins() + if len(runningPlugins) == 0 { + break + } + time.Sleep(50 * time.Millisecond) + } +} + +func waitForStatus(t *testing.T, api *api, newPluginID models.PluginID, waitStatus string, timeout time.Duration) { + timeoutAt := time.Now().Add(timeout) + for timeoutAt.After(time.Now()) { + status := api.GetPluginStatus(newPluginID) + // t.Log("plugin", newPluginID, "status", status) + if status.String() == waitStatus { + return + } + time.Sleep(10 * time.Millisecond) + } + require.FailNow(t, "timed out waiting for status "+waitStatus) +} + +func TestSetFieldConfig(t *testing.T) { + creator := inputs.Inputs["kafka_consumer"] + cfg := map[string]interface{}{ + "name": "alias", + "alias": "bar", + "interval": "30s", + "collection_jitter": "5s", + "precision": "1ms", + "name_override": "my", + "measurement_prefix": "prefix_", + "measurement_suffix": "_suffix", + "tags": map[string]interface{}{ + "tag1": "value", + }, + "filter": map[string]interface{}{ + "namedrop": []string{"namedrop"}, + "namepass": []string{"namepass"}, + "fielddrop": []string{"fielddrop"}, + "fieldpass": []string{"fieldpass"}, + "tagdrop": []map[string]interface{}{{ + "name": "tagfilter", + "filter": []string{"filter"}, + }}, + "tagpass": []map[string]interface{}{{ + "name": "tagpass", + "filter": []string{"tagpassfilter"}, + }}, + "tagexclude": []string{"tagexclude"}, + "taginclude": []string{"taginclude"}, + }, + "brokers": []string{"localhost:9092"}, + "topics": []string{"foo"}, + "topic_tag": "foo", + "client_id": "tg123", + "tls_ca": "/etc/telegraf/ca.pem", + "tls_cert": "/etc/telegraf/cert.pem", + "tls_key": "/etc/telegraf/key.pem", + "insecure_skip_verify": true, + "sasl_mechanism": "SCRAM-SHA-256", + "sasl_version": 1, + "compression_codec": 1, + "sasl_username": "Some-Username", + "data_format": "influx", + } + i := creator() + err := setFieldConfig(cfg, i) + require.NoError(t, err) + expect := &kafka_consumer.KafkaConsumer{ + Brokers: []string{"localhost:9092"}, + Topics: []string{"foo"}, + TopicTag: "foo", + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientID: "tg123", + CompressionCodec: 1, + SASLAuth: kafka.SASLAuth{ + SASLUsername: "Some-Username", + SASLMechanism: "SCRAM-SHA-256", + SASLVersion: intptr(1), + }, + ClientConfig: tls.ClientConfig{ + TLSCA: "/etc/telegraf/ca.pem", + TLSCert: "/etc/telegraf/cert.pem", + TLSKey: "/etc/telegraf/key.pem", + InsecureSkipVerify: true, + }, + }, + }, + } + + require.Equal(t, expect, i) + + icfg := &models.InputConfig{} + err = setFieldConfig(cfg, icfg) + require.NoError(t, err) + expected := &models.InputConfig{ + Name: "alias", + Alias: "bar", + Interval: 30 * time.Second, + CollectionJitter: 5 * time.Second, + Precision: 1 * time.Millisecond, + NameOverride: "my", + MeasurementPrefix: "prefix_", + MeasurementSuffix: "_suffix", + Tags: map[string]string{ + "tag1": "value", + }, + Filter: models.Filter{ + NameDrop: []string{"namedrop"}, + NamePass: []string{"namepass"}, + FieldDrop: []string{"fielddrop"}, + FieldPass: []string{"fieldpass"}, + TagDrop: []models.TagFilter{{ + Name: "tagfilter", + Filter: []string{"filter"}, + }}, + TagPass: []models.TagFilter{{ + Name: "tagpass", + Filter: []string{"tagpassfilter"}, + }}, + TagExclude: []string{"tagexclude"}, + TagInclude: []string{"taginclude"}, + }, + } + + require.Equal(t, expected, icfg) +} + +// TODO: test different plugin types + +func TestExampleWorstPlugin(t *testing.T) { + input := map[string]interface{}{ + "elapsed": "3s", + "elapsed2": "4s", + "readtimeout": "5s", + "size1": "8MiB", + "size2": "9MiB", + "pointerstruct": map[string]interface{}{ + "field": "f", + }, + "b": true, + "i": 1, + "i8": 2, + "i32": 3, + "u8": 4, + "f": 5.0, + "pf": 6.0, + "ps": "I am a string pointer", + // type Header map[string][]string + "header": map[string]interface{}{ + "Content-Type": []interface{}{ + "json/application", "text/html", + }, + }, + "fields": map[string]interface{}{ + "field1": "field1", + "field2": 1, + "field3": float64(5), + }, + "reservedkeys": map[string]bool{ + "key": true, + }, + "stringtonumber": map[string][]map[string]float64{ + "s": { + { + "n": 1.0, + }, + }, + }, + "clean": []map[string]interface{}{ + { + "field": "fieldtest", + }, + }, + "templates": []map[string]interface{}{ + { + "tag": "tagtest", + }, + }, + "value": "string", + "devicetags": map[string][]map[string]string{ + "s": { + { + "n": "1.0", + }, + }, + }, + "percentiles": []interface{}{ + 1, + }, + "floatpercentiles": []interface{}{ + 1.0, + }, + "mapofstructs": map[string]interface{}{ + "src": map[string]interface{}{ + "dest": "d", + }, + }, + "command": []interface{}{ + "string", + 1, + 2.0, + }, + "tagslice": []interface{}{ + []interface{}{ + "s", + }, + }, + "address": []interface{}{ + 1, + }, + } + readTimeout := internal.Duration{Duration: 5 * time.Second} + b := true + var i int = 1 + f := float64(6) + s := "I am a string pointer" + header := http.Header{ + "Content-Type": []string{"json/application", "text/html"}, + } + expected := ExampleWorstPlugin{ + Elapsed: config.Duration(3 * time.Second), + Elapsed2: internal.Duration{Duration: 4 * time.Second}, + ReadTimeout: &readTimeout, + Size1: config.Size(8 * 1024 * 1024), + Size2: internal.Size{Size: 9 * 1024 * 1024}, + PointerStruct: &baseopts{Field: "f"}, + B: &b, + I: &i, + I8: 2, + I32: 3, + U8: 4, + F: 5, + PF: &f, + PS: &s, + Header: header, + DefaultFieldsSets: map[string]interface{}{ + "field1": "field1", + "field2": 1, + "field3": float64(5), + }, + ReservedKeys: map[string]bool{ + "key": true, + }, + StringToNumber: map[string][]map[string]float64{ + "s": { + { + "n": 1.0, + }, + }, + }, + Clean: []baseopts{ + {Field: "fieldtest"}, + }, + Templates: []*baseopts{ + {Tag: "tagtest"}, + }, + Value: "string", + DeviceTags: map[string][]map[string]string{ + "s": { + { + "n": "1.0", + }, + }, + }, + Percentiles: []internal.Number{ + {Value: 1}, + }, + FloatPercentiles: []float64{1.0}, + MapOfStructs: map[string]baseopts{ + "src": { + Dest: "d", + }, + }, + Command: []interface{}{ + "string", + 1, + 2.0, + }, + TagSlice: [][]string{ + {"s"}, + }, + Address: []uint16{ + 1, + }, + } + + actual := ExampleWorstPlugin{} + err := setFieldConfig(input, &actual) + require.NoError(t, err) + require.Equal(t, expected, actual) +} + +type ExampleWorstPlugin struct { + Elapsed config.Duration + Elapsed2 internal.Duration + ReadTimeout *internal.Duration + Size1 config.Size + Size2 internal.Size + PointerStruct *baseopts + B *bool + I *int + I8 int8 + I32 int32 + U8 uint8 + F float64 + PF *float64 + PS *string + Header http.Header + DefaultFieldsSets map[string]interface{} `toml:"fields"` + ReservedKeys map[string]bool + StringToNumber map[string][]map[string]float64 + Clean []baseopts + Templates []*baseopts + Value interface{} `json:"value"` + DeviceTags map[string][]map[string]string + Percentiles []internal.Number + FloatPercentiles []float64 + MapOfStructs map[string]baseopts + Command []interface{} + TagSlice [][]string + Address []uint16 `toml:"address"` +} + +type baseopts struct { + Field string + Tag string + Dest string +} + +func intptr(i int) *int { + return &i +} diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go new file mode 100644 index 0000000000000..51d6a092a8014 --- /dev/null +++ b/plugins/configs/api/listener.go @@ -0,0 +1,123 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net/http" + "time" + + "github.com/gorilla/mux" + "github.com/influxdata/telegraf/models" +) + +type ConfigAPIService struct { + server *http.Server + api *api +} + +func newConfigAPIService(server *http.Server, api *api) *ConfigAPIService { + service := &ConfigAPIService{ + server: server, + api: api, + } + server.Handler = service.mux() + return service +} + +func (s *ConfigAPIService) mux() *mux.Router { + m := mux.NewRouter() + m.HandleFunc("/status", s.status) + m.HandleFunc("/plugins/create", s.createPlugin) + m.HandleFunc("/plugins/{id:[0-9a-f]+}/status", s.pluginStatus) + m.HandleFunc("/plugins/list", s.listPlugins) + m.HandleFunc("/plugins/running", s.runningPlugins) + return m +} + +func (s *ConfigAPIService) status(w http.ResponseWriter, req *http.Request) { + if req.Body != nil { + defer req.Body.Close() + } + _, _ = w.Write([]byte("ok")) +} + +func (s *ConfigAPIService) createPlugin(w http.ResponseWriter, req *http.Request) { + if req.Body != nil { + defer req.Body.Close() + } + cfg := PluginConfigCreate{} + + dec := json.NewDecoder(req.Body) + if err := dec.Decode(&cfg); err != nil { + log.Printf("E! [configapi] decode error %v", err) + w.WriteHeader(http.StatusBadRequest) + return + } + id, err := s.api.CreatePlugin(cfg) + if err != nil { + log.Printf("E! [configapi] error creating plugin %v", err) + w.WriteHeader(http.StatusBadRequest) + return + } + _, _ = w.Write([]byte(fmt.Sprintf(`{"id": "%s"}`, id))) +} + +func (s *ConfigAPIService) Start() { + // if s.server.TLSConfig != nil { + // s.server.ListenAndServeTLS() + // } + go func() { + _ = s.server.ListenAndServe() + }() +} + +func (s *ConfigAPIService) listPlugins(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + typeInfo := s.api.ListPluginTypes() + + bytes, err := json.Marshal(typeInfo) + if err != nil { + log.Printf("!E [configapi] error marshalling json: %v", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + _, _ = w.Write(bytes) +} + +func (s *ConfigAPIService) runningPlugins(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + plugins := s.api.ListRunningPlugins() + + bytes, err := json.Marshal(plugins) + if err != nil { + log.Printf("!E [configapi] error marshalling json: %v", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + _, _ = w.Write(bytes) +} + +func (s *ConfigAPIService) pluginStatus(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + id := mux.Vars(req)["id"] + if len(id) == 0 { + w.WriteHeader(http.StatusBadRequest) + return + } + state := s.api.GetPluginStatus(models.PluginID(id)) + _, err := w.Write([]byte(fmt.Sprintf(`{"status": %q}`, state.String()))) + if err != nil { + log.Printf("W! error writing to connection: %v", err) + return + } +} + +func (s *ConfigAPIService) Stop() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := s.server.Shutdown(ctx); err != nil { + log.Printf("W! [configapi] error on shutdown: %s", err) + } +} diff --git a/plugins/configs/api/listener_test.go b/plugins/configs/api/listener_test.go new file mode 100644 index 0000000000000..abfbf2386051e --- /dev/null +++ b/plugins/configs/api/listener_test.go @@ -0,0 +1,103 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/config" + "github.com/stretchr/testify/require" +) + +func TestStatus(t *testing.T) { + s := &ConfigAPIService{} + srv := httptest.NewServer(s.mux()) + + resp, err := http.Get(srv.URL + "/status") + require.NoError(t, err) + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + require.EqualValues(t, "ok", body) + require.EqualValues(t, 200, resp.StatusCode) +} + +func TestStartPlugin(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c := config.NewConfig() + agent := agent.NewAgent(ctx, c) + api, outputCancel := newAPI(ctx, c, agent) + go agent.RunWithAPI(outputCancel) + + s := &ConfigAPIService{ + api: api, + } + srv := httptest.NewServer(s.mux()) + + buf := bytes.NewBufferString(`{ + "name": "inputs.file", + "config": { + "files": ["testdata.lp"], + "data_format": "influx" + } +}`) + resp, err := http.Post(srv.URL+"/plugins/create", "application/json", buf) + require.NoError(t, err) + createResp := struct { + ID string + }{} + require.EqualValues(t, 200, resp.StatusCode) + err = json.NewDecoder(resp.Body).Decode(&createResp) + require.NoError(t, err) + resp.Body.Close() + + require.Regexp(t, `^[\da-f]{8}\d{8}$`, createResp.ID) + + statusResp := struct { + Status string + Reason string + }{} + + for statusResp.Status != "running" { + resp, err = http.Get(srv.URL + "/plugins/" + createResp.ID + "/status") + require.NoError(t, err) + + require.EqualValues(t, 200, resp.StatusCode) + err = json.NewDecoder(resp.Body).Decode(&statusResp) + require.NoError(t, err) + resp.Body.Close() + } + + require.EqualValues(t, "running", statusResp.Status) + + resp, err = http.Get(srv.URL + "/plugins/list") + require.NoError(t, err) + require.EqualValues(t, 200, resp.StatusCode) + listResp := []PluginConfigTypeInfo{} + err = json.NewDecoder(resp.Body).Decode(&listResp) + require.NoError(t, err) + resp.Body.Close() + + if len(listResp) < 20 { + require.FailNow(t, "expected there to be more than 20 plugins loaded, was only", len(listResp)) + } + + resp, err = http.Get(srv.URL + "/plugins/running") + require.NoError(t, err) + require.EqualValues(t, 200, resp.StatusCode) + runningList := []Plugin{} + err = json.NewDecoder(resp.Body).Decode(&runningList) + require.NoError(t, err) + resp.Body.Close() + + if len(runningList) != 1 { + require.FailNow(t, "expected there to be 1 running plugin, was", len(runningList)) + } + +} diff --git a/plugins/configs/api/plugin.go b/plugins/configs/api/plugin.go new file mode 100644 index 0000000000000..a2315db189eed --- /dev/null +++ b/plugins/configs/api/plugin.go @@ -0,0 +1,75 @@ +package api + +import ( + "context" + "fmt" + "net/http" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/configs" +) + +type ConfigAPIPlugin struct { + ServiceAddress string `toml:"service_address"` + Storage config.StoragePlugin `toml:"storage"` + tls.ServerConfig + + api *api + cancel context.CancelFunc + server *ConfigAPIService + + plugins []PluginConfig +} + +func (a *ConfigAPIPlugin) GetName() string { + return "api" +} + +func (a *ConfigAPIPlugin) Init(ctx context.Context, cfg *config.Config, agent config.AgentController) error { + a.api, a.cancel = newAPI(ctx, cfg, agent) + + // TODO: is this needed? + if err := a.Storage.Init(); err != nil { + return nil + } + + if err := a.Storage.Load("config-api", "plugins", &a.plugins); err != nil { + return fmt.Errorf("loading plugin state: %w", err) + } + + // start listening for HTTP requests + tlsConfig, err := a.TLSConfig() + if err != nil { + return err + } + if a.ServiceAddress == "" { + a.ServiceAddress = ":7551" + } + a.server = newConfigAPIService(&http.Server{ + Addr: a.ServiceAddress, + TLSConfig: tlsConfig, + }, a.api) + + a.server.Start() + return nil +} + +func (a *ConfigAPIPlugin) Close() error { + // shut down server + // stop accepting new requests + // wait until all requests finish + a.server.Stop() + + // store state + if err := a.Storage.Save("config-api", "plugins", &a.plugins); err != nil { + return fmt.Errorf("saving plugin state: %w", err) + } + return nil +} + +func init() { + configs.Add("api", func() config.ConfigPlugin { + return &ConfigAPIPlugin{} + }) +} diff --git a/plugins/configs/api/testdata.lp b/plugins/configs/api/testdata.lp new file mode 100644 index 0000000000000..e056407e7305d --- /dev/null +++ b/plugins/configs/api/testdata.lp @@ -0,0 +1,5 @@ +sqlserver_requests,database_name=AdventureWorks2016,sql_instance=QDLP03:SQL2017,query_hash=aaa Somefield=10 +sqlserver_requests,database_name=AdventureWorks2016,sql_instance=QDLP03:SQL2017,query_hash=bbb Somefield=2 +sqlserver_requests,database_name=Northwind,sql_instance=QDLP03:SQL2017,query_hash=ccc Somefield=4 +sqlserver_requests,database_name=Northwind,sql_instance=QDLP03:SQL2017,query_hash=ddd Somefield=6 +sqlserver_cpu,sql_instance=QDLP03:SQL2017 other_process_cpu=8i,sqlserver_process_cpu=12i,system_idle_cpu=80i diff --git a/plugins/configs/registry.go b/plugins/configs/registry.go new file mode 100644 index 0000000000000..067f3743b9f6b --- /dev/null +++ b/plugins/configs/registry.go @@ -0,0 +1,7 @@ +package configs + +import "github.com/influxdata/telegraf/config" + +func Add(name string, creator config.ConfigCreator) { + config.ConfigPlugins[name] = creator +} From 71f2e573d8614a7e067d516d296d8e91cdd4c910 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 12:02:00 -0400 Subject: [PATCH 07/48] processor, state and plugin models --- models/running_input.go | 60 ++++++++++++++++++++++++++------ models/running_output.go | 47 +++++++++++++++++-------- models/running_plugin.go | 38 ++++++++++++++++++++ models/running_processor.go | 31 ++++++++++++----- models/running_processor_test.go | 6 ++-- models/state.go | 42 ++++++++++++++++++++++ 6 files changed, 187 insertions(+), 37 deletions(-) create mode 100644 models/running_plugin.go create mode 100644 models/state.go diff --git a/models/running_input.go b/models/running_input.go index 70a4c2ee3a70f..e7a5affa2ae95 100644 --- a/models/running_input.go +++ b/models/running_input.go @@ -13,6 +13,7 @@ var ( ) type RunningInput struct { + ID uint64 Input telegraf.Input Config *InputConfig @@ -21,6 +22,7 @@ type RunningInput struct { MetricsGathered selfstat.Stat GatherTime selfstat.Stat + State } func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput { @@ -38,6 +40,7 @@ func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput { SetLoggerOnPlugin(input, logger) return &RunningInput{ + ID: NextPluginID(), Input: input, Config: config, MetricsGathered: selfstat.Register( @@ -56,23 +59,58 @@ func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput { // InputConfig is the common config for all inputs. type InputConfig struct { - Name string - Alias string - Interval time.Duration - CollectionJitter time.Duration - Precision time.Duration - - NameOverride string - MeasurementPrefix string - MeasurementSuffix string - Tags map[string]string - Filter Filter + Name string `toml:"name" json:"name"` + Alias string `toml:"alias" json:"alias"` + Interval time.Duration `toml:"interval" json:"interval"` + CollectionJitter time.Duration `toml:"collection_jitter" json:"collection_jitter"` + Precision time.Duration `toml:"precision" json:"precision"` + + NameOverride string `toml:"name_override" json:"name_override"` + MeasurementPrefix string `toml:"measurement_prefix" json:"measurement_prefix"` + MeasurementSuffix string `toml:"measurement_suffix" json:"measurement_suffix"` + Tags map[string]string `toml:"tags" json:"tags"` + Filter Filter `toml:"filter" json:"filter"` } func (r *RunningInput) metricFiltered(metric telegraf.Metric) { metric.Drop() } +func (r *RunningInput) Start(acc telegraf.Accumulator) error { + r.setState(PluginStateStarting) + if si, ok := r.Input.(telegraf.ServiceInput); ok { + if err := si.Start(acc); err != nil { + return err + } + } + r.setState(PluginStateRunning) + return nil +} + +func (r *RunningInput) Stop() { + state := r.GetState() + + switch state { + case PluginStateCreated, PluginStateStarting: + // shutting down before it got started. nothing to close + r.setState(PluginStateDead) + return + } + + if state != PluginStateRunning { + panic("plugin state was not running, it was: " + state.String()) + } + r.setState(PluginStateStopping) + if si, ok := r.Input.(telegraf.ServiceInput); ok { + si.Stop() + } + r.setState(PluginStateDead) +} + +func (r *RunningInput) GetID() uint64 { + return r.ID +} + func (r *RunningInput) LogName() string { return logName("inputs", r.Config.Name, r.Config.Alias) } diff --git a/models/running_output.go b/models/running_output.go index 6f5f8c0a84bad..e879a93797eaf 100644 --- a/models/running_output.go +++ b/models/running_output.go @@ -35,6 +35,8 @@ type OutputConfig struct { // RunningOutput contains the output configuration type RunningOutput struct { + ID uint64 + // Must be 64-bit aligned newMetricsCount int64 droppedMetrics int64 @@ -53,6 +55,7 @@ type RunningOutput struct { log telegraf.Logger aggMutex sync.Mutex + State } func NewRunningOutput( @@ -87,6 +90,7 @@ func NewRunningOutput( } ro := &RunningOutput{ + ID: NextPluginID(), buffer: NewBuffer(config.Name, config.Alias, bufferLimit), BatchReady: make(chan time.Time, 1), Output: output, @@ -105,6 +109,7 @@ func NewRunningOutput( ), log: logger, } + ro.setState(PluginStateCreated) return ro } @@ -143,13 +148,6 @@ func (r *RunningOutput) AddMetric(metric telegraf.Metric) { return } - if output, ok := r.Output.(telegraf.AggregatingOutput); ok { - r.aggMutex.Lock() - output.Add(metric) - r.aggMutex.Unlock() - return - } - if len(r.Config.NameOverride) > 0 { metric.SetName(r.Config.NameOverride) } @@ -178,14 +176,6 @@ func (r *RunningOutput) AddMetric(metric telegraf.Metric) { // Write writes all metrics to the output, stopping when all have been sent on // or error. func (r *RunningOutput) Write() error { - if output, ok := r.Output.(telegraf.AggregatingOutput); ok { - r.aggMutex.Lock() - metrics := output.Push() - r.buffer.Add(metrics...) - output.Reset() - r.aggMutex.Unlock() - } - atomic.StoreInt64(&r.newMetricsCount, 0) // Only process the metrics in the buffer now. Metrics added while we are @@ -227,10 +217,23 @@ func (r *RunningOutput) WriteBatch() error { // Close closes the output func (r *RunningOutput) Close() { + state := r.GetState() + + switch state { + case PluginStateCreated, PluginStateStarting: + r.setState(PluginStateDead) + return + } + + if state != PluginStateRunning { + panic("expected output plugin to be running, but was: " + state.String()) + } + r.setState(PluginStateStopping) err := r.Output.Close() if err != nil { r.log.Errorf("Error closing output: %v", err) } + r.setState(PluginStateDead) } func (r *RunningOutput) write(metrics []telegraf.Metric) error { @@ -251,6 +254,20 @@ func (r *RunningOutput) write(metrics []telegraf.Metric) error { return err } +func (r *RunningOutput) Connect() error { + r.setState(PluginStateStarting) + err := r.Output.Connect() + if err != nil { + return err + } + r.setState(PluginStateRunning) + return nil +} + +func (r *RunningOutput) GetID() uint64 { + return r.ID +} + func (r *RunningOutput) LogBufferStatus() { nBuffer := r.buffer.Len() r.log.Debugf("Buffer fullness: %d / %d metrics", nBuffer, r.MetricBufferLimit) diff --git a/models/running_plugin.go b/models/running_plugin.go new file mode 100644 index 0000000000000..226cccf0ea899 --- /dev/null +++ b/models/running_plugin.go @@ -0,0 +1,38 @@ +package models + +import ( + "strconv" + + "github.com/influxdata/telegraf" +) + +// PluginID is the random id assigned to the plugin so it can be referenced later +type PluginID string + +func (id PluginID) Uint64() uint64 { + result, _ := strconv.ParseUint(string(id), 16, 64) + return result +} + +type RunningPlugin interface { + Init() error + GetID() uint64 + GetState() PluginState +} + +// ProcessorRunner is an interface common to processors and aggregators so that aggregators can act like processors, including being ordered with and between processors +type ProcessorRunner interface { + RunningPlugin + Start(acc telegraf.Accumulator) error + Add(m telegraf.Metric, acc telegraf.Accumulator) error + Stop() + Order() int64 + LogName() string +} + +// ProcessorRunners add sorting +type ProcessorRunners []ProcessorRunner + +func (rp ProcessorRunners) Len() int { return len(rp) } +func (rp ProcessorRunners) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] } +func (rp ProcessorRunners) Less(i, j int) bool { return rp[i].Order() < rp[j].Order() } diff --git a/models/running_processor.go b/models/running_processor.go index 5201fb27f19c0..9544364016d10 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -8,18 +8,13 @@ import ( ) type RunningProcessor struct { + ID uint64 sync.Mutex log telegraf.Logger Processor telegraf.StreamingProcessor Config *ProcessorConfig } -type RunningProcessors []*RunningProcessor - -func (rp RunningProcessors) Len() int { return len(rp) } -func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] } -func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order } - // FilterConfig containing a name and filter type ProcessorConfig struct { Name string @@ -41,14 +36,18 @@ func NewRunningProcessor(processor telegraf.StreamingProcessor, config *Processo }) SetLoggerOnPlugin(processor, logger) - return &RunningProcessor{ + p := &RunningProcessor{ + ID: NextPluginID(), Processor: processor, Config: config, log: logger, } + p.setState(PluginStateCreated) + return p } func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) { + //TODO(steve): rp.MetricsFiltered.Incr(1) metric.Drop() } @@ -75,7 +74,13 @@ func (rp *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { } func (rp *RunningProcessor) Start(acc telegraf.Accumulator) error { - return rp.Processor.Start(acc) + r.setState(PluginStateStarting) + err := r.Processor.Start(acc) + if err != nil { + return err + } + r.setState(PluginStateRunning) + return nil } func (rp *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error { @@ -96,5 +101,15 @@ func (rp *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) err } func (rp *RunningProcessor) Stop() { + r.setState(PluginStateStopping) rp.Processor.Stop() + r.setState(PluginStateDead) +} + +func (r *RunningProcessor) Order() int64 { + return r.Config.Order +} + +func (r *RunningProcessor) GetID() uint64 { + return r.ID } diff --git a/models/running_processor_test.go b/models/running_processor_test.go index 14df03253bd38..4116a025361ed 100644 --- a/models/running_processor_test.go +++ b/models/running_processor_test.go @@ -221,9 +221,9 @@ func TestRunningProcessor_Order(t *testing.T) { }, } - procs := models.RunningProcessors{rp2, rp3, rp1} + procs := models.ProcessorRunners{rp2, rp3, rp1} sort.Sort(procs) - require.Equal(t, - models.RunningProcessors{rp1, rp2, rp3}, + require.EqualValues(t, + models.ProcessorRunners{rp1, rp2, rp3}, procs) } diff --git a/models/state.go b/models/state.go new file mode 100644 index 0000000000000..7cd25ea5628f8 --- /dev/null +++ b/models/state.go @@ -0,0 +1,42 @@ +package models + +import "sync/atomic" + +// PluginState describes what the instantiated plugin is currently doing +// needs to stay int32 for use with atomic +type PluginState int32 + +const ( + PluginStateCreated PluginState = iota + PluginStateStarting + PluginStateRunning + PluginStateStopping + PluginStateDead +) + +func (p PluginState) String() string { + switch p { + case PluginStateCreated: + return "created" + case PluginStateStarting: + return "starting" + case PluginStateRunning: + return "running" + case PluginStateStopping: + return "stopping" + default: + return "dead" + } +} + +type State struct { + state PluginState +} + +func (s *State) setState(newState PluginState) { + atomic.StoreInt32((*int32)(&s.state), int32(newState)) +} + +func (s *State) GetState() PluginState { + return PluginState(atomic.LoadInt32((*int32)(&s.state))) +} From 8595580f9faa44e2215a73a9fbf91e2ea7c8f251 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 21 Jul 2021 11:01:29 -0500 Subject: [PATCH 08/48] plugins and stuff --- plugins/inputs/execd/execd_test.go | 47 ++++++++- .../kafka_consumer_legacy.go | 3 +- plugins/inputs/opcua/opcua_client_test.go | 56 ++++++++++- plugins/outputs/sumologic/sumologic_test.go | 47 ++++++++- plugins/parsers/registry.go | 98 +++++++++---------- .../processors/reverse_dns/reversedns_test.go | 9 +- plugins/processors/starlark/starlark_test.go | 2 +- plugins/storage/all/all.go | 5 + plugins/storage/boltdb/boltdb.go | 85 ++++++++++++++++ plugins/storage/boltdb/boltdb_test.go | 48 +++++++++ plugins/storage/registry.go | 7 ++ testutil/accumulator.go | 7 ++ 12 files changed, 351 insertions(+), 63 deletions(-) create mode 100644 plugins/storage/all/all.go create mode 100644 plugins/storage/boltdb/boltdb.go create mode 100644 plugins/storage/boltdb/boltdb_test.go create mode 100644 plugins/storage/registry.go diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index a8c8364394480..92cfe2b80b717 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -29,10 +29,11 @@ func TestSettingConfigWorks(t *testing.T) { signal = "SIGHUP" ` conf := config.NewConfig() - require.NoError(t, conf.LoadConfigData([]byte(cfg))) + conf.SetAgent(&testAgentController{}) + require.NoError(t, conf.LoadConfigData(context.Background(), context.Background(), []byte(cfg))) - require.Len(t, conf.Inputs, 1) - inp, ok := conf.Inputs[0].Input.(*Execd) + require.Len(t, conf.Inputs(), 1) + inp, ok := conf.Inputs()[0].Input.(*Execd) require.True(t, ok) require.EqualValues(t, []string{"a", "b", "c"}, inp.Command) require.EqualValues(t, 1*time.Minute, inp.RestartDelay) @@ -194,3 +195,43 @@ func runCounterProgram() error { } return nil } + +type testAgentController struct { + inputs []*models.RunningInput + processors []models.ProcessorRunner + outputs []*models.RunningOutput + // configs []*config.RunningConfigPlugin +} + +func (a *testAgentController) reset() { + a.inputs = nil + a.processors = nil + a.outputs = nil + // a.configs = nil +} + +func (a *testAgentController) RunningInputs() []*models.RunningInput { + return a.inputs +} +func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { + return a.processors +} +func (a *testAgentController) RunningOutputs() []*models.RunningOutput { + return a.outputs +} +func (a *testAgentController) AddInput(input *models.RunningInput) { + a.inputs = append(a.inputs, input) +} +func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { + a.processors = append(a.processors, processor) +} +func (a *testAgentController) AddOutput(output *models.RunningOutput) { + a.outputs = append(a.outputs, output) +} +func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} +func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} +func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} +func (a *testAgentController) StopInput(i *models.RunningInput) {} +func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) StopOutput(p *models.RunningOutput) {} diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index ab19e0875820a..5429ae2f294ca 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -91,6 +91,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { defer k.Unlock() var consumerErr error + k.done = make(chan struct{}) k.acc = acc config := consumergroup.NewConfig() @@ -122,8 +123,6 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { k.errs = k.Consumer.Errors() } - k.done = make(chan struct{}) - // Start the kafka message reader go k.receiver() k.Log.Infof("Started the kafka consumer service, peers: %v, topics: %v\n", diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index ffa8521dd05a8..05fa916a34107 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -75,6 +75,10 @@ func MapOPCTag(tags OPCTags) (out NodeSettings) { } func TestConfig(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode. You can remove this skip after Init() doesn't try to connect") + } + toml := ` [[inputs.opcua]] name = "localhost" @@ -107,12 +111,16 @@ nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] ` c := config.NewConfig() - err := c.LoadConfigData([]byte(toml)) - require.NoError(t, err) + c.SetAgent(&testAgentController{}) + err := c.LoadConfigData(context.Background(), context.Background(), []byte(toml)) + // opcua shouldn't be trying to connect in init. + if !strings.Contains(err.Error(), "connection refused") { + require.NoError(t, err) + } - require.Len(t, c.Inputs, 1) + require.Len(t, c.Inputs(), 1) - o, ok := c.Inputs[0].Input.(*OpcUA) + o, ok := c.Inputs()[0].Input.(*OpcUA) require.True(t, ok) require.Len(t, o.RootNodes, 2) @@ -255,3 +263,43 @@ func TestValidateOPCTags(t *testing.T) { }) } } + +type testAgentController struct { + inputs []*models.RunningInput + processors []models.ProcessorRunner + outputs []*models.RunningOutput + // configs []*config.RunningConfigPlugin +} + +func (a *testAgentController) reset() { + a.inputs = nil + a.processors = nil + a.outputs = nil + // a.configs = nil +} + +func (a *testAgentController) RunningInputs() []*models.RunningInput { + return a.inputs +} +func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { + return a.processors +} +func (a *testAgentController) RunningOutputs() []*models.RunningOutput { + return a.outputs +} +func (a *testAgentController) AddInput(input *models.RunningInput) { + a.inputs = append(a.inputs, input) +} +func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { + a.processors = append(a.processors, processor) +} +func (a *testAgentController) AddOutput(output *models.RunningOutput) { + a.outputs = append(a.outputs, output) +} +func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} +func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} +func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} +func (a *testAgentController) StopInput(i *models.RunningInput) {} +func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) StopOutput(p *models.RunningOutput) {} diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 5ce502bab2c0e..09bf41c9b294b 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "compress/gzip" + "context" "fmt" "io" "io/ioutil" @@ -21,6 +22,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/serializers/carbon2" "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/prometheus" @@ -432,11 +434,12 @@ func TestTOMLConfig(t *testing.T) { for _, tt := range testcases { t.Run(tt.name, func(t *testing.T) { c := config.NewConfig() + c.SetAgent(&testAgentController{}) if tt.expectedError { - require.Error(t, c.LoadConfigData(tt.configBytes)) + require.Error(t, c.LoadConfigData(context.Background(), context.Background(), tt.configBytes)) } else { - require.NoError(t, c.LoadConfigData(tt.configBytes)) + require.NoError(t, c.LoadConfigData(context.Background(), context.Background(), tt.configBytes)) } }) } @@ -647,3 +650,43 @@ func countLines(t *testing.T, body io.Reader) int { return linesCount } + +type testAgentController struct { + inputs []*models.RunningInput + processors []models.ProcessorRunner + outputs []*models.RunningOutput + // configs []*config.RunningConfigPlugin +} + +func (a *testAgentController) reset() { + a.inputs = nil + a.processors = nil + a.outputs = nil + // a.configs = nil +} + +func (a *testAgentController) RunningInputs() []*models.RunningInput { + return a.inputs +} +func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { + return a.processors +} +func (a *testAgentController) RunningOutputs() []*models.RunningOutput { + return a.outputs +} +func (a *testAgentController) AddInput(input *models.RunningInput) { + a.inputs = append(a.inputs, input) +} +func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { + a.processors = append(a.processors, processor) +} +func (a *testAgentController) AddOutput(output *models.RunningOutput) { + a.outputs = append(a.outputs, output) +} +func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} +func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} +func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} +func (a *testAgentController) StopInput(i *models.RunningInput) {} +func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) StopOutput(p *models.RunningOutput) {} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index cc2102c9532d2..6c35a3971972f 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -66,107 +66,107 @@ type Parser interface { // and can be used to instantiate _any_ of the parsers. type Config struct { // Dataformat can be one of: json, influx, graphite, value, nagios - DataFormat string `toml:"data_format"` + DataFormat string `toml:"data_format" json:"data_format"` // Separator only applied to Graphite data. - Separator string `toml:"separator"` + Separator string `toml:"separator" json:"separator"` // Templates only apply to Graphite data. - Templates []string `toml:"templates"` + Templates []string `toml:"templates" json:"templates"` // TagKeys only apply to JSON data - TagKeys []string `toml:"tag_keys"` + TagKeys []string `toml:"tag_keys" json:"tag_keys"` // Array of glob pattern strings keys that should be added as string fields. - JSONStringFields []string `toml:"json_string_fields"` + JSONStringFields []string `toml:"json_string_fields" json:"json_string_fields"` - JSONNameKey string `toml:"json_name_key"` + JSONNameKey string `toml:"json_name_key" json:"json_name_key"` // MetricName applies to JSON & value. This will be the name of the measurement. - MetricName string `toml:"metric_name"` + MetricName string `toml:"metric_name" json:"metric_name"` // holds a gjson path for json parser - JSONQuery string `toml:"json_query"` + JSONQuery string `toml:"json_query" json:"json_query"` // key of time - JSONTimeKey string `toml:"json_time_key"` + JSONTimeKey string `toml:"json_time_key" json:"json_time_key"` // time format - JSONTimeFormat string `toml:"json_time_format"` + JSONTimeFormat string `toml:"json_time_format" json:"json_time_format"` // default timezone - JSONTimezone string `toml:"json_timezone"` + JSONTimezone string `toml:"json_timezone" json:"json_timezone"` // Whether to continue if a JSON object can't be coerced - JSONStrict bool `toml:"json_strict"` + JSONStrict bool `toml:"json_strict" json:"json_strict"` // Authentication file for collectd - CollectdAuthFile string `toml:"collectd_auth_file"` + CollectdAuthFile string `toml:"collectd_auth_file" json:"collectd_auth_file"` // One of none (default), sign, or encrypt - CollectdSecurityLevel string `toml:"collectd_security_level"` + CollectdSecurityLevel string `toml:"collectd_security_level" json:"collectd_security_level"` // Dataset specification for collectd - CollectdTypesDB []string `toml:"collectd_types_db"` + CollectdTypesDB []string `toml:"collectd_types_db" json:"collectd_types_db"` // whether to split or join multivalue metrics - CollectdSplit string `toml:"collectd_split"` + CollectdSplit string `toml:"collectd_split" json:"collectd_split"` // DataType only applies to value, this will be the type to parse value to - DataType string `toml:"data_type"` + DataType string `toml:"data_type" json:"data_type"` // DefaultTags are the default tags that will be added to all parsed metrics. - DefaultTags map[string]string `toml:"default_tags"` + DefaultTags map[string]string `toml:"default_tags" json:"default_tags"` // an optional json path containing the metric registry object // if left empty, the whole json object is parsed as a metric registry - DropwizardMetricRegistryPath string `toml:"dropwizard_metric_registry_path"` + DropwizardMetricRegistryPath string `toml:"dropwizard_metric_registry_path" json:"dropwizard_metric_registry_path"` // an optional json path containing the default time of the metrics // if left empty, the processing time is used - DropwizardTimePath string `toml:"dropwizard_time_path"` + DropwizardTimePath string `toml:"dropwizard_time_path" json:"dropwizard_time_path"` // time format to use for parsing the time field // defaults to time.RFC3339 - DropwizardTimeFormat string `toml:"dropwizard_time_format"` + DropwizardTimeFormat string `toml:"dropwizard_time_format" json:"dropwizard_time_format"` // an optional json path pointing to a json object with tag key/value pairs // takes precedence over DropwizardTagPathsMap - DropwizardTagsPath string `toml:"dropwizard_tags_path"` + DropwizardTagsPath string `toml:"dropwizard_tags_path" json:"dropwizard_tags_path"` // an optional map containing tag names as keys and json paths to retrieve the tag values from as values // used if TagsPath is empty or doesn't return any tags - DropwizardTagPathsMap map[string]string `toml:"dropwizard_tag_paths_map"` + DropwizardTagPathsMap map[string]string `toml:"dropwizard_tag_paths_map" json:"dropwizard_tag_paths_map"` //grok patterns - GrokPatterns []string `toml:"grok_patterns"` - GrokNamedPatterns []string `toml:"grok_named_patterns"` - GrokCustomPatterns string `toml:"grok_custom_patterns"` - GrokCustomPatternFiles []string `toml:"grok_custom_pattern_files"` - GrokTimezone string `toml:"grok_timezone"` - GrokUniqueTimestamp string `toml:"grok_unique_timestamp"` + GrokPatterns []string `toml:"grok_patterns" json:"grok_patterns"` + GrokNamedPatterns []string `toml:"grok_named_patterns" json:"grok_named_patterns"` + GrokCustomPatterns string `toml:"grok_custom_patterns" json:"grok_custom_patterns"` + GrokCustomPatternFiles []string `toml:"grok_custom_pattern_files" json:"grok_custom_pattern_files"` + GrokTimezone string `toml:"grok_timezone" json:"grok_timezone"` + GrokUniqueTimestamp string `toml:"grok_unique_timestamp" json:"grok_unique_timestamp"` //csv configuration - CSVColumnNames []string `toml:"csv_column_names"` - CSVColumnTypes []string `toml:"csv_column_types"` - CSVComment string `toml:"csv_comment"` - CSVDelimiter string `toml:"csv_delimiter"` - CSVHeaderRowCount int `toml:"csv_header_row_count"` - CSVMeasurementColumn string `toml:"csv_measurement_column"` - CSVSkipColumns int `toml:"csv_skip_columns"` - CSVSkipRows int `toml:"csv_skip_rows"` - CSVTagColumns []string `toml:"csv_tag_columns"` - CSVTimestampColumn string `toml:"csv_timestamp_column"` - CSVTimestampFormat string `toml:"csv_timestamp_format"` - CSVTimezone string `toml:"csv_timezone"` - CSVTrimSpace bool `toml:"csv_trim_space"` - CSVSkipValues []string `toml:"csv_skip_values"` + CSVColumnNames []string `toml:"csv_column_names" json:"csv_column_names"` + CSVColumnTypes []string `toml:"csv_column_types" json:"csv_column_types"` + CSVComment string `toml:"csv_comment" json:"csv_comment"` + CSVDelimiter string `toml:"csv_delimiter" json:"csv_delimiter"` + CSVHeaderRowCount int `toml:"csv_header_row_count" json:"csv_header_row_count"` + CSVMeasurementColumn string `toml:"csv_measurement_column" json:"csv_measurement_column"` + CSVSkipColumns int `toml:"csv_skip_columns" json:"csv_skip_columns"` + CSVSkipRows int `toml:"csv_skip_rows" json:"csv_skip_rows"` + CSVTagColumns []string `toml:"csv_tag_columns" json:"csv_tag_columns"` + CSVTimestampColumn string `toml:"csv_timestamp_column" json:"csv_timestamp_column"` + CSVTimestampFormat string `toml:"csv_timestamp_format" json:"csv_timestamp_format"` + CSVTimezone string `toml:"csv_timezone" json:"csv_timezone"` + CSVTrimSpace bool `toml:"csv_trim_space" json:"csv_trim_space"` + CSVSkipValues []string `toml:"csv_skip_values" json:"csv_skip_values"` // FormData configuration - FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` + FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys" json:"form_urlencoded_tag_keys"` // Value configuration - ValueFieldName string `toml:"value_field_name"` + ValueFieldName string `toml:"value_field_name" json:"value_field_name"` // XPath configuration - XPathPrintDocument bool `toml:"xpath_print_document"` - XPathProtobufFile string `toml:"xpath_protobuf_file"` - XPathProtobufType string `toml:"xpath_protobuf_type"` + XPathPrintDocument bool `toml:"xpath_print_document" json:"xpath_print_document"` + XPathProtobufFile string `toml:"xpath_protobuf_file" json:"xpath_protobuf_file"` + XPathProtobufType string `toml:"xpath_protobuf_type" json:"xpath_protobuf_type"` XPathConfig []XPathConfig // JSONPath configuration - JSONV2Config []JSONV2Config `toml:"json_v2"` + JSONV2Config []JSONV2Config `toml:"json_v2" json:"json_v2"` } type XPathConfig xpath.Config diff --git a/plugins/processors/reverse_dns/reversedns_test.go b/plugins/processors/reverse_dns/reversedns_test.go index 5fcce5fb4725a..eab2853251b1e 100644 --- a/plugins/processors/reverse_dns/reversedns_test.go +++ b/plugins/processors/reverse_dns/reversedns_test.go @@ -1,12 +1,14 @@ package reverse_dns import ( + "context" "runtime" "testing" "time" "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" @@ -54,8 +56,11 @@ func TestSimpleReverseLookup(t *testing.T) { func TestLoadingConfig(t *testing.T) { c := config.NewConfig() - err := c.LoadConfigData([]byte("[[processors.reverse_dns]]\n" + sampleConfig)) + ctx := context.Background() + a := agent.NewAgent(ctx, c) + c.SetAgent(a) + err := c.LoadConfigData(ctx, ctx, []byte("[[processors.reverse_dns]]\n"+sampleConfig)) require.NoError(t, err) - require.Len(t, c.Processors, 1) + require.Len(t, c.Processors(), 1) } diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 15152a2f349c3..4868bfca5b627 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -3189,7 +3189,7 @@ func TestAllScriptTestData(t *testing.T) { paths := []string{"testdata", "plugins/processors/starlark/testdata"} for _, testdataPath := range paths { filepath.Walk(testdataPath, func(path string, info os.FileInfo, err error) error { - if info == nil || info.IsDir() { + if info == nil || info.IsDir() || !strings.HasSuffix(info.Name(), ".star") { return nil } fn := path diff --git a/plugins/storage/all/all.go b/plugins/storage/all/all.go new file mode 100644 index 0000000000000..40709549cb9bf --- /dev/null +++ b/plugins/storage/all/all.go @@ -0,0 +1,5 @@ +package all + +import ( + _ "github.com/influxdata/telegraf/plugins/storage/boltdb" +) diff --git a/plugins/storage/boltdb/boltdb.go b/plugins/storage/boltdb/boltdb.go new file mode 100644 index 0000000000000..397c0879d9d14 --- /dev/null +++ b/plugins/storage/boltdb/boltdb.go @@ -0,0 +1,85 @@ +package boltdb + +import ( + "fmt" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/storage" + "github.com/ugorji/go/codec" + bolt "go.etcd.io/bbolt" +) + +var ( + codecHandle = &codec.MsgpackHandle{} +) + +type BoltDBStorage struct { + Filename string `toml:"file"` + + db *bolt.DB +} + +func (s *BoltDBStorage) Init() error { + if len(s.Filename) == 0 { + return fmt.Errorf("Storage service requires filename of db") + } + db, err := bolt.Open(s.Filename, 0600, nil) + if err != nil { + return fmt.Errorf("couldn't open file %q: %w", s.Filename, err) + } + s.db = db + return nil +} + +func (s *BoltDBStorage) Close() error { + fmt.Println("boltdb closing") + return s.db.Close() +} + +func (s *BoltDBStorage) Load(namespace, key string, obj interface{}) error { + err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(namespace)) + if b == nil { + // don't error on not found + return nil + } + v := b.Get([]byte(key)) + decoder := codec.NewDecoderBytes(v, codecHandle) + + if err := decoder.Decode(obj); err != nil { + return fmt.Errorf("decoding: %w", err) + } + + return nil + }) + return err +} + +func (s *BoltDBStorage) Save(namespace, key string, value interface{}) error { + return s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(namespace)) + if b == nil { + bucket, err := tx.CreateBucket([]byte(namespace)) + if err != nil { + return err + } + b = bucket + } + var byt []byte + enc := codec.NewEncoderBytes(&byt, codecHandle) + if err := enc.Encode(value); err != nil { + return fmt.Errorf("encoding: %w", err) + } + return b.Put([]byte(key), byt) + }) +} + +func (s *BoltDBStorage) GetName() string { + return "internal" +} + +func init() { + storage.Add("internal", func() config.StoragePlugin { + return &BoltDBStorage{} + }) +} diff --git a/plugins/storage/boltdb/boltdb_test.go b/plugins/storage/boltdb/boltdb_test.go new file mode 100644 index 0000000000000..c1a77605fb5d6 --- /dev/null +++ b/plugins/storage/boltdb/boltdb_test.go @@ -0,0 +1,48 @@ +package boltdb + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +type Foo struct { + Shoes int + Carrots string + Aardvark bool + Neon float64 +} + +func TestSaveLoadCycle(t *testing.T) { + p := &BoltDBStorage{ + Filename: "testdb.db", + } + defer func() { + os.Remove("testdb.db") + }() + err := p.Init() + require.NoError(t, err) + + // check that loading a missing key doesn't fail + err = p.Load("testing", "foo", &Foo{}) + require.NoError(t, err) + + foo := Foo{ + Shoes: 3, + Carrots: "blue", + Aardvark: true, + Neon: 3.1415, + } + err = p.Save("testing", "foo", foo) + require.NoError(t, err) + + obj := &Foo{} + err = p.Load("testing", "foo", obj) + require.NoError(t, err) + + require.EqualValues(t, foo, *obj) + + err = p.Close() + require.NoError(t, err) +} diff --git a/plugins/storage/registry.go b/plugins/storage/registry.go new file mode 100644 index 0000000000000..b8401a886932f --- /dev/null +++ b/plugins/storage/registry.go @@ -0,0 +1,7 @@ +package storage + +import "github.com/influxdata/telegraf/config" + +func Add(name string, creator config.StorageCreator) { + config.StoragePlugins[name] = creator +} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 4da3a76fcc8ca..16e412ed1b665 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -200,6 +200,10 @@ func (a *Accumulator) WithTracking(_ int) telegraf.TrackingAccumulator { return a } +func (a *Accumulator) WithNewMetricMaker(logName string, logger telegraf.Logger, f func(metric telegraf.Metric) telegraf.Metric) telegraf.Accumulator { + return a +} + func (a *Accumulator) AddTrackingMetric(m telegraf.Metric) telegraf.TrackingID { a.AddMetric(m) return newTrackingID() @@ -739,3 +743,6 @@ func (n *NopAccumulator) AddMetric(telegraf.Metric) {} func (n *NopAccumulator) SetPrecision(_ time.Duration) {} func (n *NopAccumulator) AddError(_ error) {} func (n *NopAccumulator) WithTracking(_ int) telegraf.TrackingAccumulator { return nil } +func (n *NopAccumulator) WithNewMetricMaker(logName string, logger telegraf.Logger, f func(metric telegraf.Metric) telegraf.Metric) telegraf.Accumulator { + return nil +} From 714a809847aeedb46004cd42e2fd7df783ec3a3e Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 21 Jul 2021 11:12:02 -0500 Subject: [PATCH 09/48] update go mod --- go.mod | 4 +++- go.sum | 10 +++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index f0f36e2df8717..6a147c6773a79 100644 --- a/go.mod +++ b/go.mod @@ -67,7 +67,7 @@ require ( github.com/google/go-cmp v0.5.5 github.com/google/go-github/v32 v32.1.0 github.com/gopcua/opcua v0.1.13 - github.com/gorilla/mux v1.7.3 + github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 github.com/gosnmp/gosnmp v1.32.0 github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b @@ -128,6 +128,7 @@ require ( github.com/tidwall/gjson v1.8.0 github.com/tinylib/msgp v1.1.5 github.com/tklauser/go-sysconf v0.3.5 // indirect + github.com/ugorji/go/codec v1.2.6 github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 github.com/vjeantet/grok v1.0.1 github.com/vmware/govmomi v0.19.0 @@ -137,6 +138,7 @@ require ( github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect + go.etcd.io/bbolt v1.3.6 go.mongodb.org/mongo-driver v1.5.3 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/multierr v1.6.0 // indirect diff --git a/go.sum b/go.sum index 20b7759feef97..0dfccc49e3a73 100644 --- a/go.sum +++ b/go.sum @@ -782,8 +782,9 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -1484,6 +1485,10 @@ github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+l github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.2.6 h1:tGiWC9HENWE2tqYycIqFTNorMmFRVhNwCpDOpWqnk8E= +github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0= +github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ= +github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -1544,6 +1549,8 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1811,6 +1818,7 @@ golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From fd12e02002fc51af35730b02d9c147e3229d8eae Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 12:24:51 -0400 Subject: [PATCH 10/48] running processor state --- models/running_processor.go | 1 + 1 file changed, 1 insertion(+) diff --git a/models/running_processor.go b/models/running_processor.go index 9544364016d10..26ff79a3d754b 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -13,6 +13,7 @@ type RunningProcessor struct { log telegraf.Logger Processor telegraf.StreamingProcessor Config *ProcessorConfig + State } // FilterConfig containing a name and filter From cb198c10c18114394f871d6f8fea592ea6f4a0e6 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 12:29:16 -0400 Subject: [PATCH 11/48] config.Duration, auto imports, missing bracket --- agent/accumulator.go | 1 + agent/agent.go | 34 ++++++++++++++++------------------ agent/agent_test.go | 3 ++- cmd/telegraf/telegraf.go | 1 - config/config_test.go | 20 ++++++++++---------- 5 files changed, 29 insertions(+), 30 deletions(-) diff --git a/agent/accumulator.go b/agent/accumulator.go index 705f38b49b116..a8a403750702a 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -1,6 +1,7 @@ package agent import ( + "sync" "time" "github.com/influxdata/telegraf" diff --git a/agent/agent.go b/agent/agent.go index a7e21e07a0a44..9e4a9f3909db5 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2,19 +2,20 @@ package agent import ( "context" + "errors" "fmt" "log" "os" + "reflect" "runtime" - "sort" "sync" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/channel" "github.com/influxdata/telegraf/models" - "github.com/influxdata/telegraf/plugins/serializers/influx" ) // Agent runs a set of plugins. @@ -522,7 +523,7 @@ func (a *Agent) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) log.Printf("E! [agent] Storage plugin failed to close: %v", err) } } - } + } a.configPluginUnit.Lock() pos := -1 @@ -552,7 +553,6 @@ func (a *Agent) StopOutput(output *models.RunningOutput) { } } - func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) { var until time.Time if roundInterval { @@ -573,15 +573,15 @@ func (a *Agent) AddOutput(output *models.RunningOutput) { a.outputGroupUnit.Lock() a.outputGroupUnit.outputs = append(a.outputGroupUnit.outputs, outputUnit{output: output}) a.outputGroupUnit.Unlock() - } +} func (a *Agent) startOutput(output *models.RunningOutput) error { if err := a.connectOutput(a.ctx, output); err != nil { return fmt.Errorf("connecting output %s: %w", output.LogName(), err) - } + } return nil - } +} // connectOutputs connects to all outputs. func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) error { @@ -622,8 +622,8 @@ func (a *Agent) RunOutput(ctx context.Context, output *models.RunningOutput) { } a.outputGroupUnit.Unlock() - interval := a.Config.Agent.FlushInterval.Duration - jitter := a.Config.Agent.FlushJitter.Duration + interval := a.Config.Agent.FlushInterval.Duration + jitter := a.Config.Agent.FlushJitter.Duration // Overwrite agent flush_interval if this plugin has its own. if output.Config.FlushInterval != 0 { interval = output.Config.FlushInterval @@ -642,7 +642,7 @@ func (a *Agent) RunOutput(ctx context.Context, output *models.RunningOutput) { if err = a.startOutput(output); err != nil { log.Printf("E! [agent] failed to start output %q: %v", output.LogName(), err) time.Sleep(10 * time.Second) - } + } } a.flushLoop(ctx, output, ticker) @@ -655,7 +655,7 @@ func (a *Agent) RunOutput(ctx context.Context, output *models.RunningOutput) { // disconnect it from the output broadcaster and remove it from the list a.outputGroupUnit.outputs = append(a.outputGroupUnit.outputs[:i], a.outputGroupUnit.outputs[i+1:]...) } - } + } a.outputGroupUnit.Unlock() a.flushOnce(output, ticker, output.Write) @@ -673,12 +673,12 @@ func (a *Agent) runOutputFanout() { for i, output := range outs { if i == len(outs)-1 { output.output.AddMetric(metric) - } else { + } else { output.output.AddMetric(metric.Copy()) - } - } - } - + } + } + } +} // flushLoop runs an output's flush function periodically until the context is done. func (a *Agent) flushLoop( @@ -747,8 +747,6 @@ func (a *Agent) flushOnce( } } - - // Returns the rounding precision for metrics. func getPrecision(precision, interval time.Duration) time.Duration { if precision > 0 { diff --git a/agent/agent_test.go b/agent/agent_test.go index ea04f4dda2640..7aa3a118d76de 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -1,13 +1,14 @@ package agent import ( + "context" + "strings" "testing" "time" "github.com/influxdata/telegraf/config" _ "github.com/influxdata/telegraf/plugins/inputs/all" _ "github.com/influxdata/telegraf/plugins/outputs/all" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index b67603acad3cb..2d6dca4c6b65d 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -2,7 +2,6 @@ package main import ( "context" - "errors" "flag" "fmt" "log" diff --git a/config/config_test.go b/config/config_test.go index 8c066047d6242..39bf116c401cb 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -240,8 +240,8 @@ func TestConfig_LoadSpecialTypes(t *testing.T) { input, ok := c.Inputs()[0].Input.(*MockupInputPlugin) require.True(t, ok) // Tests telegraf duration parsing. - require.Equal(t, Duration(time.Second), input.WriteTimeout) // Tests telegraf size parsing. - require.Equal(t, Size(1024*1024), input.MaxBodySize) + require.Equal(t, config.Duration(time.Second), input.WriteTimeout) // Tests telegraf size parsing. + require.Equal(t, config.Size(1024*1024), input.MaxBodySize) // Tests toml multiline basic strings. require.Equal(t, "/path/to/my/cert", strings.TrimRight(input.TLSCert, "\r\n")) } @@ -499,13 +499,13 @@ func (a *testAgentController) StopOutput(p *models.RunningOutput) /*** Mockup INPUT plugin for testing to avoid cyclic dependencies ***/ type MockupInputPlugin struct { - Servers []string `toml:"servers"` - Methods []string `toml:"methods"` - Timeout Duration `toml:"timeout"` - ReadTimeout Duration `toml:"read_timeout"` - WriteTimeout Duration `toml:"write_timeout"` - MaxBodySize Size `toml:"max_body_size"` - Port int `toml:"port"` + Servers []string `toml:"servers"` + Methods []string `toml:"methods"` + Timeout config.Duration `toml:"timeout"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + MaxBodySize config.Size `toml:"max_body_size"` + Port int `toml:"port"` Command string PidFile string Log telegraf.Logger `toml:"-"` @@ -538,7 +538,7 @@ func (m *MockupOuputPlugin) Write(metrics []telegraf.Metric) error { return nil // Register the mockup plugin on loading func init() { // Register the mockup input plugin for the required names - inputs.Add("exec", func() telegraf.Input { return &MockupInputPlugin{Timeout: Duration(time.Second * 5)} }) + inputs.Add("exec", func() telegraf.Input { return &MockupInputPlugin{Timeout: config.Duration(time.Second * 5)} }) inputs.Add("http_listener_v2", func() telegraf.Input { return &MockupInputPlugin{} }) inputs.Add("memcached", func() telegraf.Input { return &MockupInputPlugin{} }) inputs.Add("procstat", func() telegraf.Input { return &MockupInputPlugin{} }) From 7c9fa8a6941644b61c18a35f819ffc8627b1d246 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 12:31:56 -0400 Subject: [PATCH 12/48] fix naming in runningprocessor --- models/running_processor.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/running_processor.go b/models/running_processor.go index 26ff79a3d754b..9435126b52a1c 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -75,12 +75,12 @@ func (rp *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { } func (rp *RunningProcessor) Start(acc telegraf.Accumulator) error { - r.setState(PluginStateStarting) - err := r.Processor.Start(acc) + rp.setState(PluginStateStarting) + err := rp.Processor.Start(acc) if err != nil { return err } - r.setState(PluginStateRunning) + rp.setState(PluginStateRunning) return nil } @@ -102,9 +102,9 @@ func (rp *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) err } func (rp *RunningProcessor) Stop() { - r.setState(PluginStateStopping) + rp.setState(PluginStateStopping) rp.Processor.Stop() - r.setState(PluginStateDead) + rp.setState(PluginStateDead) } func (r *RunningProcessor) Order() int64 { From 372422588dbd9d3d3ec87f5a8297023d2d5e1c5b Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 12:37:11 -0400 Subject: [PATCH 13/48] more consistency with rp --- models/running_processor.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/running_processor.go b/models/running_processor.go index 9435126b52a1c..f07822c3d69b7 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -107,10 +107,10 @@ func (rp *RunningProcessor) Stop() { rp.setState(PluginStateDead) } -func (r *RunningProcessor) Order() int64 { - return r.Config.Order +func (rp *RunningProcessor) Order() int64 { + return rp.Config.Order } -func (r *RunningProcessor) GetID() uint64 { - return r.ID +func (rp *RunningProcessor) GetID() uint64 { + return rp.ID } From 99cf9f4be5c80097d8a31081ce5e81eff1af64f7 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 12:42:00 -0400 Subject: [PATCH 14/48] package imports --- plugins/inputs/execd/execd_test.go | 1 + plugins/inputs/opcua/opcua_client_test.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index 92cfe2b80b717..bd4e56d5390d9 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -2,6 +2,7 @@ package execd import ( "bufio" + "context" "flag" "fmt" "os" diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 05fa916a34107..3a678fc385520 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -1,12 +1,15 @@ package opcua_client import ( + "context" "fmt" "reflect" + "strings" "testing" "time" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) From a749373453955b28607e61deb7c37427923ce576 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 15:04:56 -0400 Subject: [PATCH 15/48] starlark test fix --- plugins/processors/starlark/starlark_test.go | 49 ++++++++++++++++++-- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 4868bfca5b627..a3756ab418fe9 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -1,6 +1,7 @@ package starlark import ( + "context" "errors" "fmt" "io/ioutil" @@ -12,6 +13,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -2604,14 +2606,15 @@ def apply(metric): // Build a Starlark plugin from the provided configuration. func buildPlugin(configContent string) (*Starlark, error) { c := config.NewConfig() - err := c.LoadConfigData([]byte(configContent)) + c.SetAgent(&testAgentController{}) + err := c.LoadConfigData(context.Background(), context.Background(), []byte(configContent)) if err != nil { return nil, err } - if len(c.Processors) != 1 { + if len(c.Processors()) != 1 { return nil, errors.New("Only one processor was expected") } - plugin, ok := (c.Processors[0].Processor).(*Starlark) + plugin, ok := (c.Processors()[0].(*models.RunningProcessor).Processor).(*Starlark) if !ok { return nil, errors.New("Only a Starlark processor was expected") } @@ -3298,3 +3301,43 @@ func testLoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, e func testNow(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { return starlarktime.Time(time.Date(2021, 4, 15, 12, 0, 0, 999, time.UTC)), nil } + +type testAgentController struct { + inputs []*models.RunningInput + processors []models.ProcessorRunner + outputs []*models.RunningOutput + // configs []*config.RunningConfigPlugin +} + +func (a *testAgentController) reset() { + a.inputs = nil + a.processors = nil + a.outputs = nil + // a.configs = nil +} + +func (a *testAgentController) RunningInputs() []*models.RunningInput { + return a.inputs +} +func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { + return a.processors +} +func (a *testAgentController) RunningOutputs() []*models.RunningOutput { + return a.outputs +} +func (a *testAgentController) AddInput(input *models.RunningInput) { + a.inputs = append(a.inputs, input) +} +func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { + a.processors = append(a.processors, processor) +} +func (a *testAgentController) AddOutput(output *models.RunningOutput) { + a.outputs = append(a.outputs, output) +} +func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} +func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} +func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} +func (a *testAgentController) StopInput(i *models.RunningInput) {} +func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) StopOutput(p *models.RunningOutput) {} From 3bd6a95f580b4db4eb25198c4ef310c5f7ac4933 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 15:07:14 -0400 Subject: [PATCH 16/48] json v2 and starlark fixes --- plugins/parsers/json_v2/parser_test.go | 48 ++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index f0f018034dc5b..3218ca6d1c942 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -2,13 +2,16 @@ package json_v2_test import ( "bufio" + "context" "fmt" "io/ioutil" "os" "testing" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/file" "github.com/influxdata/telegraf/plugins/parsers/influx" @@ -96,12 +99,13 @@ func TestData(t *testing.T) { return &file.File{} }) cfg := config.NewConfig() - err = cfg.LoadConfigData(buf) + cfg.SetAgent(&testAgentController{}) + err = cfg.LoadConfigData(context.Background(), context.Background(), []byte(buf)) require.NoError(t, err) // Gather the metrics from the input file configure acc := testutil.Accumulator{} - for _, i := range cfg.Inputs { + for _, i := range cfg.Inputs() { err = i.Init() require.NoError(t, err) err = i.Gather(&acc) @@ -144,3 +148,43 @@ func readMetricFile(path string) ([]telegraf.Metric, error) { return metrics, nil } + +type testAgentController struct { + inputs []*models.RunningInput + processors []models.ProcessorRunner + outputs []*models.RunningOutput + // configs []*config.RunningConfigPlugin +} + +func (a *testAgentController) reset() { + a.inputs = nil + a.processors = nil + a.outputs = nil + // a.configs = nil +} + +func (a *testAgentController) RunningInputs() []*models.RunningInput { + return a.inputs +} +func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { + return a.processors +} +func (a *testAgentController) RunningOutputs() []*models.RunningOutput { + return a.outputs +} +func (a *testAgentController) AddInput(input *models.RunningInput) { + a.inputs = append(a.inputs, input) +} +func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { + a.processors = append(a.processors, processor) +} +func (a *testAgentController) AddOutput(output *models.RunningOutput) { + a.outputs = append(a.outputs, output) +} +func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} +func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} +func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} +func (a *testAgentController) StopInput(i *models.RunningInput) {} +func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) StopOutput(p *models.RunningOutput) {} From 5a1bf4155b30f8a057fcd1fde25cbc0b5d35512c Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 15:16:34 -0400 Subject: [PATCH 17/48] fix agent, agent controller --- agent/agent.go | 14 +++++++------- agent/agentcontrol_test.go | 4 ++-- plugins/configs/api/controller.go | 8 ++++---- plugins/configs/api/controller_test.go | 20 ++++++++++---------- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 9e4a9f3909db5..caa58e410716a 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -154,8 +154,8 @@ func (a *Agent) RunWithAPI(outputCancel context.CancelFunc) { log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ "Flush Interval:%s", - a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, - a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) + time.Duration(a.Config.Agent.Interval), a.Config.Agent.Quiet, + a.Config.Agent.Hostname, time.Duration(a.Config.Agent.FlushInterval)) a.inputGroupUnit.relay.Start() @@ -222,17 +222,17 @@ func (a *Agent) startInput(input *models.RunningInput) error { // RunInput is a blocking call that runs an input forever func (a *Agent) RunInput(input *models.RunningInput, startTime time.Time) { // default to agent interval but check for override - interval := a.Config.Agent.Interval.Duration + interval := time.Duration(a.Config.Agent.Interval) if input.Config.Interval != 0 { interval = input.Config.Interval } // default to agent precision but check for override - precision := a.Config.Agent.Precision.Duration + precision := time.Duration(a.Config.Agent.Precision) if input.Config.Precision != 0 { precision = input.Config.Precision } // default to agent collection_jitter but check for override - jitter := a.Config.Agent.CollectionJitter.Duration + jitter := time.Duration(a.Config.Agent.CollectionJitter) if input.Config.CollectionJitter != 0 { jitter = input.Config.CollectionJitter } @@ -622,8 +622,8 @@ func (a *Agent) RunOutput(ctx context.Context, output *models.RunningOutput) { } a.outputGroupUnit.Unlock() - interval := a.Config.Agent.FlushInterval.Duration - jitter := a.Config.Agent.FlushJitter.Duration + interval := time.Duration(a.Config.Agent.FlushInterval) + jitter := time.Duration(a.Config.Agent.FlushJitter) // Overwrite agent flush_interval if this plugin has its own. if output.Config.FlushInterval != 0 { interval = output.Config.FlushInterval diff --git a/agent/agentcontrol_test.go b/agent/agentcontrol_test.go index 395ee0b1d4c14..d915eb498b89d 100644 --- a/agent/agentcontrol_test.go +++ b/agent/agentcontrol_test.go @@ -27,7 +27,7 @@ func TestAgentPluginControllerLifecycle(t *testing.T) { a.AddInput(ri) go a.RunInput(ri, time.Now()) - m, _ := metric.New("testing", + m := metric.New("testing", map[string]string{ "country": "canada", }, @@ -104,7 +104,7 @@ func TestAgentPluginConnectionsAfterAddAndRemoveProcessor(t *testing.T) { waitForStatus(t, ro, "running", 1*time.Second) // inject a metric into the input plugin as if it collected it - m, _ := metric.New("mojo", nil, map[string]interface{}{"jenkins": "leroy"}, now) + m := metric.New("mojo", nil, map[string]interface{}{"jenkins": "leroy"}, now) inp.injectMetric(m) // wait for the output to get it diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index 4fcbf046ca996..c73903d717b15 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -555,11 +555,11 @@ func getFieldConfig(p interface{}, cfg map[string]FieldConfig) { if f.IsValid() && !f.IsZero() { fc.Default = f.Interface() // special handling for internal struct types so the struct doesn't serialize to an object. - if d, ok := fc.Default.(internal.Duration); ok { - fc.Default = d.Duration + if d, ok := fc.Default.(config.Duration); ok { + fc.Default = d } - if s, ok := fc.Default.(internal.Size); ok { - fc.Default = s.Size + if s, ok := fc.Default.(config.Size); ok { + fc.Default = s } } diff --git a/plugins/configs/api/controller_test.go b/plugins/configs/api/controller_test.go index 24dfe65d8d461..4df7aa87ed4f0 100644 --- a/plugins/configs/api/controller_test.go +++ b/plugins/configs/api/controller_test.go @@ -2,6 +2,7 @@ package api import ( "context" + "encoding/json" "net/http" "testing" "time" @@ -10,7 +11,6 @@ import ( "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/models" _ "github.com/influxdata/telegraf/plugins/aggregators/all" "github.com/influxdata/telegraf/plugins/common/kafka" @@ -431,7 +431,7 @@ func TestExampleWorstPlugin(t *testing.T) { 1, }, } - readTimeout := internal.Duration{Duration: 5 * time.Second} + readTimeout := config.Duration(5 * time.Second) b := true var i int = 1 f := float64(6) @@ -441,10 +441,10 @@ func TestExampleWorstPlugin(t *testing.T) { } expected := ExampleWorstPlugin{ Elapsed: config.Duration(3 * time.Second), - Elapsed2: internal.Duration{Duration: 4 * time.Second}, + Elapsed2: config.Duration(4 * time.Second), ReadTimeout: &readTimeout, Size1: config.Size(8 * 1024 * 1024), - Size2: internal.Size{Size: 9 * 1024 * 1024}, + Size2: config.Size(9 * 1024 * 1024), PointerStruct: &baseopts{Field: "f"}, B: &b, I: &i, @@ -484,8 +484,8 @@ func TestExampleWorstPlugin(t *testing.T) { }, }, }, - Percentiles: []internal.Number{ - {Value: 1}, + Percentiles: []json.Number{ + "1", }, FloatPercentiles: []float64{1.0}, MapOfStructs: map[string]baseopts{ @@ -514,10 +514,10 @@ func TestExampleWorstPlugin(t *testing.T) { type ExampleWorstPlugin struct { Elapsed config.Duration - Elapsed2 internal.Duration - ReadTimeout *internal.Duration + Elapsed2 config.Duration + ReadTimeout *config.Duration Size1 config.Size - Size2 internal.Size + Size2 config.Size PointerStruct *baseopts B *bool I *int @@ -535,7 +535,7 @@ type ExampleWorstPlugin struct { Templates []*baseopts Value interface{} `json:"value"` DeviceTags map[string][]map[string]string - Percentiles []internal.Number + Percentiles []json.Number FloatPercentiles []float64 MapOfStructs map[string]baseopts Command []interface{} From a667daf1200df87a986bc371b6dbf252b8e0effe Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 15:26:25 -0400 Subject: [PATCH 18/48] fix controller and ec2 --- plugins/configs/api/controller.go | 7 +++---- plugins/configs/api/controller_test.go | 7 +++---- plugins/processors/aws/ec2/ec2_test.go | 7 ++++++- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index c73903d717b15..650fdc731a75e 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -14,7 +14,6 @@ import ( "github.com/alecthomas/units" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/aggregators" "github.com/influxdata/telegraf/plugins/inputs" @@ -647,7 +646,7 @@ func setObject(from, to reflect.Value, destType reflect.Type) error { } if destType.String() == "internal.Number" { - n := internal.Number{Value: float64(from.Int())} + n := float64(from.Int()) to.Set(reflect.ValueOf(n)) return nil } @@ -673,7 +672,7 @@ func setObject(from, to reflect.Value, destType reflect.Type) error { } if destType.String() == "internal.Number" { - n := internal.Number{Value: float64(from.Uint())} + n := float64(from.Uint()) to.Set(reflect.ValueOf(n)) return nil } @@ -691,7 +690,7 @@ func setObject(from, to reflect.Value, destType reflect.Type) error { to = ptr.Elem() } if destType.String() == "internal.Number" { - n := internal.Number{Value: from.Float()} + n := from.Float() to.Set(reflect.ValueOf(n)) return nil } diff --git a/plugins/configs/api/controller_test.go b/plugins/configs/api/controller_test.go index 4df7aa87ed4f0..6ed8596f76121 100644 --- a/plugins/configs/api/controller_test.go +++ b/plugins/configs/api/controller_test.go @@ -2,7 +2,6 @@ package api import ( "context" - "encoding/json" "net/http" "testing" "time" @@ -484,8 +483,8 @@ func TestExampleWorstPlugin(t *testing.T) { }, }, }, - Percentiles: []json.Number{ - "1", + Percentiles: []int64{ + 1, }, FloatPercentiles: []float64{1.0}, MapOfStructs: map[string]baseopts{ @@ -535,7 +534,7 @@ type ExampleWorstPlugin struct { Templates []*baseopts Value interface{} `json:"value"` DeviceTags map[string][]map[string]string - Percentiles []json.Number + Percentiles []int64 FloatPercentiles []float64 MapOfStructs map[string]baseopts Command []interface{} diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go index 8eb599206ff99..f740022161f17 100644 --- a/plugins/processors/aws/ec2/ec2_test.go +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -1,8 +1,10 @@ package ec2 import ( + "context" "testing" + "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -51,8 +53,11 @@ func TestBasicInitInvalidTagsReturnAnError(t *testing.T) { func TestLoadingConfig(t *testing.T) { confFile := []byte("[[processors.aws_ec2]]" + "\n" + sampleConfig) + ctx := context.Background() c := config.NewConfig() - err := c.LoadConfigData(confFile) + a := agent.NewAgent(ctx, c) + c.SetAgent(a) + err := c.LoadConfigData(ctx, ctx, []byte(confFile)) require.NoError(t, err) require.Len(t, c.Processors, 1) From 58c966c1bacf62eec9de220355ebdaf7af9733b6 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 16:56:36 -0400 Subject: [PATCH 19/48] fix config tests --- config/config_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 39bf116c401cb..98d72cb0a7986 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -263,13 +263,13 @@ func TestConfig_WrongFieldType(t *testing.T) { defer agentController.reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/wrong_field_type.toml") require.Error(t, err, "invalid field type") - require.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Port) cannot unmarshal TOML string into int", err.Error()) + require.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: error parsing http_listener_v2, line 2: (config_test.MockupInputPlugin.Port) cannot unmarshal TOML string into int", err.Error()) c = config.NewConfig() c.SetAgent(agentController) err = c.LoadConfig(context.Background(), context.Background(), "./testdata/wrong_field_type2.toml") require.Error(t, err, "invalid field type2") - require.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Methods) cannot unmarshal TOML string into []string", err.Error()) + require.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: error parsing http_listener_v2, line 2: (config_test.MockupInputPlugin.Methods) cannot unmarshal TOML string into []string", err.Error()) } func TestConfig_InlineTables(t *testing.T) { @@ -279,7 +279,7 @@ func TestConfig_InlineTables(t *testing.T) { c.SetAgent(agentController) defer agentController.reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/inline_table.toml")) - require.Len(t, c.Outputs, 2) + require.Len(t, c.Outputs(), 2) output, ok := c.Outputs()[1].Output.(*MockupOuputPlugin) require.True(t, ok) @@ -295,7 +295,7 @@ func TestConfig_SliceComment(t *testing.T) { c.SetAgent(agentController) defer agentController.reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/slice_comment.toml")) - require.Len(t, c.Outputs, 1) + require.Len(t, c.Outputs(), 1) output, ok := c.Outputs()[0].Output.(*MockupOuputPlugin) require.True(t, ok) @@ -321,7 +321,7 @@ func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { c.SetAgent(agentController) defer agentController.reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/azure_monitor.toml")) - require.Len(t, c.Outputs, 2) + require.Len(t, c.Outputs(), 2) expectedPrefix := []string{"Telegraf/", ""} for i, plugin := range c.Outputs() { From b33d5a01cdefc0cb495ef8ebfa7cc064863cae65 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 21 Jul 2021 17:16:20 -0400 Subject: [PATCH 20/48] fix panic test --- config/types_test.go | 2 +- plugins/processors/aws/ec2/ec2_test.go | 61 +++++++++++++++++++++++--- 2 files changed, 55 insertions(+), 8 deletions(-) diff --git a/config/types_test.go b/config/types_test.go index 17aa8bf8df8b6..a8a10c26c263c 100644 --- a/config/types_test.go +++ b/config/types_test.go @@ -13,6 +13,7 @@ import ( func TestConfigDuration(t *testing.T) { c := config.NewConfig() + c.SetAgent(&testAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte(` [[processors.reverse_dns]] cache_ttl = "3h" @@ -23,7 +24,6 @@ func TestConfigDuration(t *testing.T) { field = "source_ip" dest = "source_name" `)) - c.SetAgent(&testAgentController{}) require.NoError(t, err) require.Len(t, c.Processors(), 1) p := c.Processors()[0].(*models.RunningProcessor).Processor.(*reverse_dns.ReverseDNS) diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go index f740022161f17..7afee70bbda02 100644 --- a/plugins/processors/aws/ec2/ec2_test.go +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -3,9 +3,10 @@ package ec2 import ( "context" "testing" + "time" - "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -52,13 +53,59 @@ func TestBasicInitInvalidTagsReturnAnError(t *testing.T) { } func TestLoadingConfig(t *testing.T) { - confFile := []byte("[[processors.aws_ec2]]" + "\n" + sampleConfig) - ctx := context.Background() + // confFile := []byte("[[processors.aws_ec2]]" + "\n" + sampleConfig) c := config.NewConfig() - a := agent.NewAgent(ctx, c) - c.SetAgent(a) - err := c.LoadConfigData(ctx, ctx, []byte(confFile)) - require.NoError(t, err) + c.SetAgent(&testAgentController{}) + err := c.LoadConfigData(context.Background(), context.Background(), []byte( + ` + [[processors.aws_ec2]] + imds_tags = ["availabilityZone"] + ec2_tags = ["availabilityZone"] + timeout = "30s" + max_parallel_calls = 10 + `, + )) + require.NoError(t, err) require.Len(t, c.Processors, 1) } + +type testAgentController struct { + inputs []*models.RunningInput + processors []models.ProcessorRunner + outputs []*models.RunningOutput + // configs []*config.RunningConfigPlugin +} + +func (a *testAgentController) reset() { + a.inputs = nil + a.processors = nil + a.outputs = nil + // a.configs = nil +} + +func (a *testAgentController) RunningInputs() []*models.RunningInput { + return a.inputs +} +func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { + return a.processors +} +func (a *testAgentController) RunningOutputs() []*models.RunningOutput { + return a.outputs +} +func (a *testAgentController) AddInput(input *models.RunningInput) { + a.inputs = append(a.inputs, input) +} +func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { + a.processors = append(a.processors, processor) +} +func (a *testAgentController) AddOutput(output *models.RunningOutput) { + a.outputs = append(a.outputs, output) +} +func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} +func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} +func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} +func (a *testAgentController) StopInput(i *models.RunningInput) {} +func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} +func (a *testAgentController) StopOutput(p *models.RunningOutput) {} From 6997228847532889dede5e0bbe485ff5af47bfb0 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Thu, 22 Jul 2021 14:27:23 -0500 Subject: [PATCH 21/48] test fix --- plugins/inputs/phpfpm/phpfpm_test.go | 4 ++-- plugins/processors/aws/ec2/ec2.go | 30 +++++++++++++------------- plugins/processors/aws/ec2/ec2_test.go | 8 ++++++- 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index c3a3f29f570f5..50d8d604efb5b 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -274,7 +274,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { //When not passing server config, we default to localhost //We just want to make sure we did request stat from localhost func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { - r := &phpfpm{} + r := &phpfpm{Urls: []string{"http://bad.localhost:62001/status"}} require.NoError(t, r.Init()) @@ -282,7 +282,7 @@ func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), "127.0.0.1/status") + assert.Contains(t, err.Error(), "/status") } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { diff --git a/plugins/processors/aws/ec2/ec2.go b/plugins/processors/aws/ec2/ec2.go index 7126214152a51..7f0a8aafad82d 100644 --- a/plugins/processors/aws/ec2/ec2.go +++ b/plugins/processors/aws/ec2/ec2.go @@ -124,6 +124,21 @@ func (r *AwsEc2Processor) Init() error { return errors.New("no tags specified in configuration") } + for _, tag := range r.ImdsTags { + if len(tag) > 0 && isImdsTagAllowed(tag) { + r.imdsTags[tag] = struct{}{} + } else { + return fmt.Errorf("not allowed metadata tag specified in configuration: %s", tag) + } + } + if len(r.imdsTags) == 0 && len(r.EC2Tags) == 0 { + return errors.New("no allowed metadata tags specified in configuration") + } + + return nil +} + +func (r *AwsEc2Processor) Start(acc telegraf.Accumulator) error { ctx := context.Background() cfg, err := awsconfig.LoadDefaultConfig(ctx) if err != nil { @@ -161,21 +176,6 @@ func (r *AwsEc2Processor) Init() error { } } - for _, tag := range r.ImdsTags { - if len(tag) > 0 && isImdsTagAllowed(tag) { - r.imdsTags[tag] = struct{}{} - } else { - return fmt.Errorf("not allowed metadata tag specified in configuration: %s", tag) - } - } - if len(r.imdsTags) == 0 && len(r.EC2Tags) == 0 { - return errors.New("no allowed metadata tags specified in configuration") - } - - return nil -} - -func (r *AwsEc2Processor) Start(acc telegraf.Accumulator) error { if r.Ordered { r.parallel = parallel.NewOrdered(acc, r.asyncAdd, DefaultMaxOrderedQueueSize, r.MaxParallelCalls) } else { diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go index 7afee70bbda02..f69d684e848e7 100644 --- a/plugins/processors/aws/ec2/ec2_test.go +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -12,6 +12,9 @@ import ( ) func TestBasicStartup(t *testing.T) { + if testing.Short() { + t.Skip("skipped test that connects to external service") + } p := newAwsEc2Processor() p.Log = &testutil.Logger{} p.ImdsTags = []string{"accountId", "instanceId"} @@ -24,6 +27,9 @@ func TestBasicStartup(t *testing.T) { } func TestBasicStartupWithEC2Tags(t *testing.T) { + if testing.Short() { + t.Skip("skipped test that connects to external service") + } p := newAwsEc2Processor() p.Log = &testutil.Logger{} p.ImdsTags = []string{"accountId", "instanceId"} @@ -67,7 +73,7 @@ func TestLoadingConfig(t *testing.T) { )) require.NoError(t, err) - require.Len(t, c.Processors, 1) + require.Len(t, c.Processors(), 1) } type testAgentController struct { From cc43b8357049cccb855a4647e9aa841e8cecf8a6 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Thu, 22 Jul 2021 15:30:58 -0400 Subject: [PATCH 22/48] remove commented out config --- plugins/processors/aws/ec2/ec2_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go index f69d684e848e7..754786d70384b 100644 --- a/plugins/processors/aws/ec2/ec2_test.go +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -59,7 +59,6 @@ func TestBasicInitInvalidTagsReturnAnError(t *testing.T) { } func TestLoadingConfig(t *testing.T) { - // confFile := []byte("[[processors.aws_ec2]]" + "\n" + sampleConfig) c := config.NewConfig() c.SetAgent(&testAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte( From 7f77230c319416618083354a1884832892a25f14 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 27 Jul 2021 10:29:27 -0500 Subject: [PATCH 23/48] update dep list --- docs/LICENSE_OF_DEPENDENCIES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 7ae13c1143db4..fba8828f7c3f5 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -210,6 +210,7 @@ following works: - github.com/tinylib/msgp [MIT License](https://github.com/tinylib/msgp/blob/master/LICENSE) - github.com/tklauser/go-sysconf [BSD 3-Clause "New" or "Revised" License](https://github.com/tklauser/go-sysconf/blob/master/LICENSE) - github.com/tklauser/numcpus [Apache License 2.0](https://github.com/tklauser/numcpus/blob/master/LICENSE) +- github.com/ugorji/go/codec [MIT License](https://github.com/sleepinggenius2/gosmi/blob/master/LICENSE) - github.com/vapourismo/knx-go [MIT License](https://github.com/vapourismo/knx-go/blob/master/LICENSE) - github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) @@ -225,6 +226,7 @@ following works: - github.com/xdg/stringprep [Apache License 2.0](https://github.com/xdg-go/stringprep/blob/master/LICENSE) - github.com/youmark/pkcs8 [MIT License](https://github.com/youmark/pkcs8/blob/master/LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) +- go.etcd.io/bbolt [MIT License](https://github.com/sleepinggenius2/gosmi/blob/master/LICENSE) - go.mongodb.org/mongo-driver [Apache License 2.0](https://github.com/mongodb/mongo-go-driver/blob/master/LICENSE) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) - go.starlark.net [BSD 3-Clause "New" or "Revised" License](https://github.com/google/starlark-go/blob/master/LICENSE) From 392bd963856994e8c74aa6b96be4d5e1b05b001c Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 27 Jul 2021 13:33:40 -0500 Subject: [PATCH 24/48] fix linter issues --- agent/accumulator.go | 2 +- agent/agent.go | 11 ++-- agent/agentcontrol_test.go | 11 +--- config/config.go | 30 ++++++---- config/config_test.go | 4 +- config/testdata/telegraf-agent.toml | 16 +++--- internal/rotate/file_writer.go | 2 +- models/id.go | 4 +- models/running_aggregator.go | 4 +- models/running_aggregator_test.go | 3 +- plugins/configs/all/all.go | 1 + plugins/configs/api/controller.go | 79 ++++++++++++-------------- plugins/configs/api/controller_test.go | 5 +- plugins/configs/api/listener.go | 12 ++-- plugins/configs/api/listener_test.go | 15 +++-- plugins/configs/api/plugin.go | 7 ++- plugins/inputs/mongodb/mongodb.go | 1 + plugins/parsers/json_v2/parser_test.go | 2 +- plugins/processors/aws/ec2/ec2.go | 5 +- plugins/storage/all/all.go | 1 + plugins/storage/boltdb/boltdb.go | 1 - plugins/storage/boltdb/boltdb_test.go | 2 +- 22 files changed, 110 insertions(+), 108 deletions(-) diff --git a/agent/accumulator.go b/agent/accumulator.go index a8a403750702a..6c843ab9f47a9 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -149,7 +149,7 @@ func (ac *accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator } } -func (ac *accumulator) WithNewMetricMaker(logName string, logger telegraf.Logger, f func(metric telegraf.Metric) telegraf.Metric) telegraf.Accumulator { +func (ac *accumulator) WithNewMetricMaker(logName string, logger telegraf.Logger, f func(m telegraf.Metric) telegraf.Metric) telegraf.Accumulator { return &metricMakerAccumulator{ Accumulator: ac, makerFunc: f, diff --git a/agent/agent.go b/agent/agent.go index caa58e410716a..489407a77b8ee 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -36,7 +36,7 @@ type Agent struct { } // NewAgent returns an Agent for the given Config. -func NewAgent(ctx context.Context, config *config.Config) *Agent { +func NewAgent(ctx context.Context, cfg *config.Config) *Agent { inputDestCh := make(chan telegraf.Metric) outputSrcCh := make(chan telegraf.Metric) @@ -44,7 +44,7 @@ func NewAgent(ctx context.Context, config *config.Config) *Agent { // as processors are added, they will be inserted between these two. return &Agent{ - Config: config, + Config: cfg, ctx: ctx, started: sync.NewCond(&sync.Mutex{}), inputGroupUnit: inputGroupUnit{ @@ -103,7 +103,6 @@ type outputUnit struct { type processorGroupUnit struct { sync.Mutex - accumulator telegraf.Accumulator processorUnits []*processorUnit } @@ -658,7 +657,9 @@ func (a *Agent) RunOutput(ctx context.Context, output *models.RunningOutput) { } a.outputGroupUnit.Unlock() - a.flushOnce(output, ticker, output.Write) + if err = a.flushOnce(output, ticker, output.Write); err != nil { + log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err) + } output.Close() } @@ -866,6 +867,4 @@ func (a *Agent) waitForPluginsToStop() { a.configPluginUnit.Unlock() // everything closed; shut down - return - } diff --git a/agent/agentcontrol_test.go b/agent/agentcontrol_test.go index d915eb498b89d..1a2b166057e08 100644 --- a/agent/agentcontrol_test.go +++ b/agent/agentcontrol_test.go @@ -72,7 +72,6 @@ func TestAgentPluginControllerLifecycle(t *testing.T) { }, now) testutil.RequireMetricEqual(t, expected, o.receivedMetrics[0]) - } func TestAgentPluginConnectionsAfterAddAndRemoveProcessor(t *testing.T) { @@ -183,20 +182,16 @@ func (p *testInputPlugin) SampleConfig() string { return "" } func (p *testInputPlugin) Description() string { return "testInputPlugin" } func (p *testInputPlugin) Gather(a telegraf.Accumulator) error { return nil } func (p *testInputPlugin) Start(a telegraf.Accumulator) error { - println("locking") p.Lock() defer p.Unlock() - println("started") p.acc = a p.started = true p.Cond.Broadcast() return nil } func (p *testInputPlugin) Stop() { - println("stopping input (waiting for lock)") p.Lock() defer p.Unlock() - println("stopped input") } func (p *testInputPlugin) injectMetric(m telegraf.Metric) { p.Lock() @@ -214,9 +209,9 @@ func (p *testProcessorPlugin) Init() error { return nil func (p *testProcessorPlugin) SampleConfig() string { return "" } func (p *testProcessorPlugin) Description() string { return "testProcessorPlugin" } func (p *testProcessorPlugin) Start(acc telegraf.Accumulator) error { return nil } -func (p *testProcessorPlugin) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { - metric.AddField("capital", "Ottawa") - acc.AddMetric(metric) +func (p *testProcessorPlugin) Add(m telegraf.Metric, acc telegraf.Accumulator) error { + m.AddField("capital", "Ottawa") + acc.AddMetric(m) return nil } func (p *testProcessorPlugin) Stop() error { return nil } diff --git a/config/config.go b/config/config.go index cac9d8793a743..9c33b0a103bf8 100644 --- a/config/config.go +++ b/config/config.go @@ -52,7 +52,7 @@ var ( `"`, `\"`, `\`, `\\`, ) - HttpLoadConfigRetryInterval = 10 * time.Second + HTTPLoadConfigRetryInterval = 10 * time.Second // fetchURLRe is a regex to determine whether the requested file should // be fetched from a remote or read from the filesystem. @@ -412,7 +412,7 @@ func PrintSampleConfig( aggregatorFilters []string, processorFilters []string, ) { - // print headers + // nolint:revive // print headers fmt.Println(header) if len(sectionFilters) == 0 { @@ -424,10 +424,12 @@ func PrintSampleConfig( if sliceContains("outputs", sectionFilters) { if len(outputFilters) != 0 { if len(outputFilters) >= 3 && outputFilters[1] != "none" { + // nolint:revive fmt.Println(outputHeader) } printFilteredOutputs(outputFilters, false) } else { + // nolint:revive fmt.Println(outputHeader) printFilteredOutputs(outputDefaults, false) // Print non-default outputs, commented @@ -446,10 +448,12 @@ func PrintSampleConfig( if sliceContains("processors", sectionFilters) { if len(processorFilters) != 0 { if len(processorFilters) >= 3 && processorFilters[1] != "none" { + // nolint:revive fmt.Println(processorHeader) } printFilteredProcessors(processorFilters, false) } else { + // nolint:revive fmt.Println(processorHeader) pnames := []string{} for pname := range processors.Processors { @@ -464,10 +468,12 @@ func PrintSampleConfig( if sliceContains("aggregators", sectionFilters) { if len(aggregatorFilters) != 0 { if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" { + // nolint:revive fmt.Println(aggregatorHeader) } printFilteredAggregators(aggregatorFilters, false) } else { + // nolint:revive fmt.Println(aggregatorHeader) pnames := []string{} for pname := range aggregators.Aggregators { @@ -482,10 +488,12 @@ func PrintSampleConfig( if sliceContains("inputs", sectionFilters) { if len(inputFilters) != 0 { if len(inputFilters) >= 3 && inputFilters[1] != "none" { + // nolint:revive fmt.Println(inputHeader) } printFilteredInputs(inputFilters, false) } else { + // nolint:revive fmt.Println(inputHeader) printFilteredInputs(inputDefaults, false) // Print non-default inputs, commented @@ -576,6 +584,7 @@ func printFilteredInputs(inputFilters []string, commented bool) { } sort.Strings(servInputNames) + // nolint:revive fmt.Println(serviceInputHeader) for _, name := range servInputNames { printConfig(name, servInputs[name], "inputs", commented) @@ -602,10 +611,12 @@ func printFilteredOutputs(outputFilters []string, commented bool) { func printFilteredGlobalSections(sectionFilters []string) { if sliceContains("global_tags", sectionFilters) { + // nolint:revive fmt.Println(globalTagsConfig) } if sliceContains("agent", sectionFilters) { + // nolint:revive fmt.Println(agentConfig) } } @@ -663,7 +674,7 @@ func PrintOutputConfig(name string) error { } // LoadDirectory loads all toml config files found in the specified path, recursively. -func (c *Config) LoadDirectory(ctx context.Context, outputCtx context.Context, path string) error { +func (c *Config) LoadDirectory(ctx context.Context, outputCtx context.Context, path string) error { // nolint:revive walkfn := func(thispath string, info os.FileInfo, _ error) error { if info == nil { log.Printf("W! Telegraf is not permitted to read %s", thispath) @@ -730,7 +741,7 @@ func isURL(str string) bool { } // LoadConfig loads the given config file and applies it to c -func (c *Config) LoadConfig(ctx context.Context, outputCtx context.Context, path string) error { +func (c *Config) LoadConfig(ctx context.Context, outputCtx context.Context, path string) error { // nolint:revive var err error if path == "" { if path, err = GetDefaultConfigPath(); err != nil { @@ -749,7 +760,7 @@ func (c *Config) LoadConfig(ctx context.Context, outputCtx context.Context, path } // LoadConfigData loads TOML-formatted config data -func (c *Config) LoadConfigData(ctx context.Context, outputCtx context.Context, data []byte) error { +func (c *Config) LoadConfigData(ctx context.Context, outputCtx context.Context, data []byte) error { // nolint:revive tbl, err := parseConfig(data) if err != nil { return fmt.Errorf("Error parsing data: %s", err) @@ -964,8 +975,8 @@ func fetchConfig(u *url.URL) ([]byte, error) { if resp.StatusCode != http.StatusOK { if i < retries { - log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, HttpLoadConfigRetryInterval, resp.StatusCode) - time.Sleep(HttpLoadConfigRetryInterval) + log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, HTTPLoadConfigRetryInterval, resp.StatusCode) + time.Sleep(HTTPLoadConfigRetryInterval) continue } return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status) @@ -1033,10 +1044,9 @@ func (c *Config) addConfigPlugin(ctx context.Context, name string, table *ast.Ta return err } - // TODO: set storage + logger := models.NewLogger("config", name, name) + models.SetLoggerOnPlugin(configPlugin, logger) - // TODO: this init is expecting to see all of the config, I think? - // what it's really getting is a reference to this config object that isn't doing much. if err := configPlugin.Init(ctx, c, c.controller); err != nil { return err } diff --git a/config/config_test.go b/config/config_test.go index 98d72cb0a7986..a6c71e6a7664b 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -332,7 +332,7 @@ func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { } func TestConfig_URLRetries3Fails(t *testing.T) { - config.HttpLoadConfigRetryInterval = 0 * time.Second + config.HTTPLoadConfigRetryInterval = 0 * time.Second responseCounter := 0 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) @@ -353,7 +353,7 @@ func TestConfig_URLRetries3Fails(t *testing.T) { } func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { - config.HttpLoadConfigRetryInterval = 0 * time.Second + config.HTTPLoadConfigRetryInterval = 0 * time.Second responseCounter := 0 ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if responseCounter <= 2 { diff --git a/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml index a0add155b77e7..4905415cf7770 100644 --- a/config/testdata/telegraf-agent.toml +++ b/config/testdata/telegraf-agent.toml @@ -183,14 +183,14 @@ # Metrics groups to be collected, by default, all enabled. master_collections = ["resources","master","system","slaves","frameworks","messages","evqueue","registrar"] -# Read metrics from one or many MongoDB servers -[[inputs.mongodb]] - # An array of URI to gather stats about. Specify an ip or hostname - # with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, - # mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. - # - # If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port. - servers = ["127.0.0.1:27017"] +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# # An array of URI to gather stats about. Specify an ip or hostname +# # with optional port add password. ie mongodb://user:auth_key@10.10.3.30:27017, +# # mongodb://10.10.3.33:18832, 10.0.0.1:10000, etc. +# # +# # If no servers are specified, then 127.0.0.1 is used as the host and 27107 as the port. +# servers = ["127.0.0.1:27017"] # Read metrics from one or many mysql servers [[inputs.mysql]] diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go index 8fabe2136ca29..6f3e29993ec59 100644 --- a/internal/rotate/file_writer.go +++ b/internal/rotate/file_writer.go @@ -4,7 +4,7 @@ package rotate import ( "fmt" "io" - "log" + "log" // nolint:revive "os" "path/filepath" "sort" diff --git a/models/id.go b/models/id.go index 05d3eeb9b19c2..2e9dbd14c1f98 100644 --- a/models/id.go +++ b/models/id.go @@ -6,8 +6,8 @@ import ( ) var ( - globalPluginIDIncrement uint32 = 0 - globalInstanceID uint32 = rand.Uint32() // set a new instance ID on every app load. + globalPluginIDIncrement uint32 + globalInstanceID = rand.Uint32() // set a new instance ID on every app load. ) // NextPluginID generates a globally unique plugin ID for use referencing the plugin within the lifetime of Telegraf. diff --git a/models/running_aggregator.go b/models/running_aggregator.go index bbe9fbe24ef8c..15ae20a14ded2 100644 --- a/models/running_aggregator.go +++ b/models/running_aggregator.go @@ -254,9 +254,9 @@ func (r *RunningAggregator) pushLoop(acc telegraf.Accumulator) { } } -func (r *RunningAggregator) pushMetricMaker(metric telegraf.Metric) telegraf.Metric { +func (r *RunningAggregator) pushMetricMaker(inMetric telegraf.Metric) telegraf.Metric { m := makemetric( - metric, + inMetric, r.Config.NameOverride, r.Config.MeasurementPrefix, r.Config.MeasurementSuffix, diff --git a/models/running_aggregator_test.go b/models/running_aggregator_test.go index 398c9d0946da0..c080816d5118c 100644 --- a/models/running_aggregator_test.go +++ b/models/running_aggregator_test.go @@ -282,7 +282,8 @@ func TestAddDoesNotModifyMetric(t *testing.T) { }, now) expected := m.Copy() - ra.Add(m, &testutil.Accumulator{}) + err := ra.Add(m, &testutil.Accumulator{}) + require.NoError(t, err) testutil.RequireMetricEqual(t, expected, m) } diff --git a/plugins/configs/all/all.go b/plugins/configs/all/all.go index ffc4394d7f537..3376811f52633 100644 --- a/plugins/configs/all/all.go +++ b/plugins/configs/all/all.go @@ -1,5 +1,6 @@ package all import ( + // Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/configs/api" ) diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index 650fdc731a75e..a7198d1eb3453 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "log" + "log" // nolint:revive "reflect" "sort" "strings" @@ -154,21 +154,20 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { } getFieldConfig(v.Config, p.Config) runningPlugins = append(runningPlugins, p) - } // TODO: add more types? return runningPlugins } -func (a *api) UpdatePlugin(ID models.PluginID, config PluginConfigCreate) error { +func (a *api) UpdatePlugin(id models.PluginID, cfg PluginConfigCreate) error { // TODO: shut down plugin and start a new plugin with the same id. return nil } -func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { - log.Printf("I! [configapi] creating plugin %q", config.Name) +func (a *api) CreatePlugin(cfg PluginConfigCreate) (models.PluginID, error) { + log.Printf("I! [configapi] creating plugin %q", cfg.Name) - parts := strings.Split(config.Name, ".") + parts := strings.Split(cfg.Name, ".") pluginType, name := parts[0], parts[1] switch pluginType { case "inputs": @@ -180,7 +179,7 @@ func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { // create a copy i := input() // set the config - if err := setFieldConfig(config.Config, i); err != nil { + if err := setFieldConfig(cfg.Config, i); err != nil { return "", err } @@ -191,7 +190,7 @@ func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { JSONStrict: true, DataFormat: "influx", } - if err := setFieldConfig(config.Config, pc); err != nil { + if err := setFieldConfig(cfg.Config, pc); err != nil { return "", err } parser, err := parsers.NewParser(pc) @@ -207,7 +206,7 @@ func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { JSONStrict: true, DataFormat: "influx", } - if err := setFieldConfig(config.Config, pc); err != nil { + if err := setFieldConfig(cfg.Config, pc); err != nil { return "", err } @@ -218,11 +217,11 @@ func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { // start it and put it into the agent manager? pluginConfig := &models.InputConfig{Name: name} - if err := setFieldConfig(config.Config, pluginConfig); err != nil { + if err := setFieldConfig(cfg.Config, pluginConfig); err != nil { return "", err } - if err := setFieldConfig(config.Config, &pluginConfig.Filter); err != nil { + if err := setFieldConfig(cfg.Config, &pluginConfig.Filter); err != nil { return "", err } @@ -247,25 +246,25 @@ func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { // create a copy o := output() // set the config - if err := setFieldConfig(config.Config, o); err != nil { + if err := setFieldConfig(cfg.Config, o); err != nil { return "", err } // start it and put it into the agent manager? pluginConfig := &models.OutputConfig{Name: name} - if err := setFieldConfig(config.Config, pluginConfig); err != nil { + if err := setFieldConfig(cfg.Config, pluginConfig); err != nil { return "", err } - if err := setFieldConfig(config.Config, &pluginConfig.Filter); err != nil { + if err := setFieldConfig(cfg.Config, &pluginConfig.Filter); err != nil { return "", err } if t, ok := o.(serializers.SerializerOutput); ok { sc := &serializers.Config{ - TimestampUnits: time.Duration(1 * time.Second), + TimestampUnits: 1 * time.Second, DataFormat: "influx", } - if err := setFieldConfig(config.Config, sc); err != nil { + if err := setFieldConfig(cfg.Config, sc); err != nil { return "", err } serializer, err := serializers.NewSerializer(sc) @@ -294,7 +293,7 @@ func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { agg := aggregator() // set the config - if err := setFieldConfig(config.Config, agg); err != nil { + if err := setFieldConfig(cfg.Config, agg); err != nil { return "", err } aggCfg := &models.AggregatorConfig{ @@ -303,11 +302,11 @@ func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { Period: time.Second * 30, Grace: time.Second * 0, } - if err := setFieldConfig(config.Config, aggCfg); err != nil { + if err := setFieldConfig(cfg.Config, aggCfg); err != nil { return "", err } - if err := setFieldConfig(config.Config, &aggCfg.Filter); err != nil { + if err := setFieldConfig(cfg.Config, &aggCfg.Filter); err != nil { return "", err } @@ -332,15 +331,15 @@ func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { rootp = unwrapme.Unwrap() } // set the config - if err := setFieldConfig(config.Config, rootp); err != nil { + if err := setFieldConfig(cfg.Config, rootp); err != nil { return "", err } // start it and put it into the agent manager? pluginConfig := &models.ProcessorConfig{Name: name} - if err := setFieldConfig(config.Config, pluginConfig); err != nil { + if err := setFieldConfig(cfg.Config, pluginConfig); err != nil { return "", err } - if err := setFieldConfig(config.Config, &pluginConfig.Filter); err != nil { + if err := setFieldConfig(cfg.Config, &pluginConfig.Filter); err != nil { return "", err } @@ -360,44 +359,43 @@ func (a *api) CreatePlugin(config PluginConfigCreate) (models.PluginID, error) { } } -func (a *api) GetPluginStatus(ID models.PluginID) models.PluginState { +func (a *api) GetPluginStatus(id models.PluginID) models.PluginState { for _, v := range a.agent.RunningInputs() { - if v.ID == ID.Uint64() { + if v.ID == id.Uint64() { return v.GetState() } } for _, v := range a.agent.RunningProcessors() { - if v.GetID() == ID.Uint64() { + if v.GetID() == id.Uint64() { return v.GetState() } } for _, v := range a.agent.RunningOutputs() { - if v.ID == ID.Uint64() { + if v.ID == id.Uint64() { return v.GetState() } } return models.PluginStateDead } -func (a *api) getPluginByID(ID models.PluginID) { - -} - -func (a *api) DeletePlugin(ID models.PluginID) error { +func (a *api) DeletePlugin(id models.PluginID) error { for _, v := range a.agent.RunningInputs() { - if v.ID == ID.Uint64() { + if v.ID == id.Uint64() { + log.Printf("I! [configapi] stopping plugin %q", v.LogName()) a.agent.StopInput(v) return nil } } for _, v := range a.agent.RunningProcessors() { - if v.GetID() == ID.Uint64() { + if v.GetID() == id.Uint64() { + log.Printf("I! [configapi] stopping plugin %q", v.LogName()) a.agent.StopProcessor(v) return nil } } for _, v := range a.agent.RunningOutputs() { - if v.ID == ID.Uint64() { + if v.ID == id.Uint64() { + log.Printf("I! [configapi] stopping plugin %q", v.LogName()) a.agent.StopOutput(v) return nil } @@ -405,11 +403,11 @@ func (a *api) DeletePlugin(ID models.PluginID) error { return nil } -// func (a *API) PausePlugin(ID models.PluginID) { +// func (a *API) PausePlugin(id models.PluginID) { // } -// func (a *API) ResumePlugin(ID models.PluginID) { +// func (a *API) ResumePlugin(id models.PluginID) { // } @@ -433,15 +431,12 @@ func setFieldConfig(cfg map[string]interface{}, p interface{}) error { } if !destField.CanSet() { destField.Addr() - // TODO: error? - fmt.Println("cannot set", k, destFieldType.Name()) - continue + return fmt.Errorf("cannot set %s (%s)", k, destFieldType.Name()) } val := reflect.ValueOf(v) if err := setObject(val, destField, destFieldType); err != nil { return fmt.Errorf("Could not set field %s: %w", k, err) } - } return nil } @@ -631,7 +626,7 @@ func setObject(from, to reflect.Value, destType reflect.Type) error { if err != nil { return fmt.Errorf("Couldn't parse size %q: %w", from.Interface().(string), err) } - to.SetInt(int64(size)) + to.SetInt(size) // TODO: handle slice types? default: // to.SetString(from.Interface().(string)) @@ -657,7 +652,7 @@ func setObject(from, to reflect.Value, destType reflect.Type) error { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: to.SetInt(from.Int()) case reflect.Float32, reflect.Float64: - to.SetFloat(float64(from.Float())) + to.SetFloat(from.Float()) case reflect.Interface: to.Set(from) default: diff --git a/plugins/configs/api/controller_test.go b/plugins/configs/api/controller_test.go index 6ed8596f76121..9c312d871eeb9 100644 --- a/plugins/configs/api/controller_test.go +++ b/plugins/configs/api/controller_test.go @@ -79,9 +79,6 @@ func TestListPluginTypes(t *testing.T) { // check anonymous composed fields require.EqualValues(t, "bool", gnmi.Config["InsecureSkipVerify"].Type) - - // TODO: check named composed fields - } func TestInputPluginLifecycle(t *testing.T) { @@ -432,7 +429,7 @@ func TestExampleWorstPlugin(t *testing.T) { } readTimeout := config.Duration(5 * time.Second) b := true - var i int = 1 + i := 1 f := float64(6) s := "I am a string pointer" header := http.Header{ diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index 51d6a092a8014..fa8400702636f 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -4,28 +4,32 @@ import ( "context" "encoding/json" "fmt" - "log" + "log" // nolint:revive "net/http" "time" "github.com/gorilla/mux" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" ) type ConfigAPIService struct { server *http.Server api *api + Log telegraf.Logger } -func newConfigAPIService(server *http.Server, api *api) *ConfigAPIService { +func newConfigAPIService(server *http.Server, api *api, logger telegraf.Logger) *ConfigAPIService { service := &ConfigAPIService{ server: server, api: api, + Log: logger, } server.Handler = service.mux() return service } +// nolint:revive func (s *ConfigAPIService) mux() *mux.Router { m := mux.NewRouter() m.HandleFunc("/status", s.status) @@ -51,13 +55,13 @@ func (s *ConfigAPIService) createPlugin(w http.ResponseWriter, req *http.Request dec := json.NewDecoder(req.Body) if err := dec.Decode(&cfg); err != nil { - log.Printf("E! [configapi] decode error %v", err) + s.Log.Error("decode error %v", err) w.WriteHeader(http.StatusBadRequest) return } id, err := s.api.CreatePlugin(cfg) if err != nil { - log.Printf("E! [configapi] error creating plugin %v", err) + s.Log.Error("error creating plugin %v", err) w.WriteHeader(http.StatusBadRequest) return } diff --git a/plugins/configs/api/listener_test.go b/plugins/configs/api/listener_test.go index abfbf2386051e..54ce7ca3dc365 100644 --- a/plugins/configs/api/listener_test.go +++ b/plugins/configs/api/listener_test.go @@ -31,9 +31,9 @@ func TestStartPlugin(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() c := config.NewConfig() - agent := agent.NewAgent(ctx, c) - api, outputCancel := newAPI(ctx, c, agent) - go agent.RunWithAPI(outputCancel) + a := agent.NewAgent(ctx, c) + api, outputCancel := newAPI(ctx, c, a) + go a.RunWithAPI(outputCancel) s := &ConfigAPIService{ api: api, @@ -55,7 +55,7 @@ func TestStartPlugin(t *testing.T) { require.EqualValues(t, 200, resp.StatusCode) err = json.NewDecoder(resp.Body).Decode(&createResp) require.NoError(t, err) - resp.Body.Close() + _ = resp.Body.Close() require.Regexp(t, `^[\da-f]{8}\d{8}$`, createResp.ID) @@ -71,7 +71,7 @@ func TestStartPlugin(t *testing.T) { require.EqualValues(t, 200, resp.StatusCode) err = json.NewDecoder(resp.Body).Decode(&statusResp) require.NoError(t, err) - resp.Body.Close() + _ = resp.Body.Close() } require.EqualValues(t, "running", statusResp.Status) @@ -82,7 +82,7 @@ func TestStartPlugin(t *testing.T) { listResp := []PluginConfigTypeInfo{} err = json.NewDecoder(resp.Body).Decode(&listResp) require.NoError(t, err) - resp.Body.Close() + _ = resp.Body.Close() if len(listResp) < 20 { require.FailNow(t, "expected there to be more than 20 plugins loaded, was only", len(listResp)) @@ -94,10 +94,9 @@ func TestStartPlugin(t *testing.T) { runningList := []Plugin{} err = json.NewDecoder(resp.Body).Decode(&runningList) require.NoError(t, err) - resp.Body.Close() + _ = resp.Body.Close() if len(runningList) != 1 { require.FailNow(t, "expected there to be 1 running plugin, was", len(runningList)) } - } diff --git a/plugins/configs/api/plugin.go b/plugins/configs/api/plugin.go index a2315db189eed..e63166e2219fb 100644 --- a/plugins/configs/api/plugin.go +++ b/plugins/configs/api/plugin.go @@ -5,6 +5,7 @@ import ( "fmt" "net/http" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/configs" @@ -19,6 +20,7 @@ type ConfigAPIPlugin struct { cancel context.CancelFunc server *ConfigAPIService + Log telegraf.Logger `toml:"-"` plugins []PluginConfig } @@ -29,9 +31,8 @@ func (a *ConfigAPIPlugin) GetName() string { func (a *ConfigAPIPlugin) Init(ctx context.Context, cfg *config.Config, agent config.AgentController) error { a.api, a.cancel = newAPI(ctx, cfg, agent) - // TODO: is this needed? if err := a.Storage.Init(); err != nil { - return nil + return fmt.Errorf("initializing storage: %w", err) } if err := a.Storage.Load("config-api", "plugins", &a.plugins); err != nil { @@ -49,7 +50,7 @@ func (a *ConfigAPIPlugin) Init(ctx context.Context, cfg *config.Config, agent co a.server = newConfigAPIService(&http.Server{ Addr: a.ServiceAddress, TLSConfig: tlsConfig, - }, a.api) + }, a.api, a.Log) a.server.Start() return nil diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 0366636200064..196249b03418f 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -135,6 +135,7 @@ func (m *MongoDB) Init() error { opts.ReadPreference = readpref.Nearest() } + // TODO: should not connect in Init() function! client, err := mongo.Connect(ctx, opts) if err != nil { return fmt.Errorf("unable to connect to MongoDB: %q", err) diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index 3218ca6d1c942..aaf7d0ff01592 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -100,7 +100,7 @@ func TestData(t *testing.T) { }) cfg := config.NewConfig() cfg.SetAgent(&testAgentController{}) - err = cfg.LoadConfigData(context.Background(), context.Background(), []byte(buf)) + err = cfg.LoadConfigData(context.Background(), context.Background(), buf) require.NoError(t, err) // Gather the metrics from the input file configure diff --git a/plugins/processors/aws/ec2/ec2.go b/plugins/processors/aws/ec2/ec2.go index 7f0a8aafad82d..088ec09c83f5f 100644 --- a/plugins/processors/aws/ec2/ec2.go +++ b/plugins/processors/aws/ec2/ec2.go @@ -125,11 +125,10 @@ func (r *AwsEc2Processor) Init() error { } for _, tag := range r.ImdsTags { - if len(tag) > 0 && isImdsTagAllowed(tag) { - r.imdsTags[tag] = struct{}{} - } else { + if len(tag) == 0 || !isImdsTagAllowed(tag) { return fmt.Errorf("not allowed metadata tag specified in configuration: %s", tag) } + r.imdsTags[tag] = struct{}{} } if len(r.imdsTags) == 0 && len(r.EC2Tags) == 0 { return errors.New("no allowed metadata tags specified in configuration") diff --git a/plugins/storage/all/all.go b/plugins/storage/all/all.go index 40709549cb9bf..15e437f9bdbad 100644 --- a/plugins/storage/all/all.go +++ b/plugins/storage/all/all.go @@ -1,5 +1,6 @@ package all import ( + // Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/storage/boltdb" ) diff --git a/plugins/storage/boltdb/boltdb.go b/plugins/storage/boltdb/boltdb.go index 397c0879d9d14..a0240b0e411d5 100644 --- a/plugins/storage/boltdb/boltdb.go +++ b/plugins/storage/boltdb/boltdb.go @@ -32,7 +32,6 @@ func (s *BoltDBStorage) Init() error { } func (s *BoltDBStorage) Close() error { - fmt.Println("boltdb closing") return s.db.Close() } diff --git a/plugins/storage/boltdb/boltdb_test.go b/plugins/storage/boltdb/boltdb_test.go index c1a77605fb5d6..0d02deac7af03 100644 --- a/plugins/storage/boltdb/boltdb_test.go +++ b/plugins/storage/boltdb/boltdb_test.go @@ -19,7 +19,7 @@ func TestSaveLoadCycle(t *testing.T) { Filename: "testdb.db", } defer func() { - os.Remove("testdb.db") + _ = os.Remove("testdb.db") }() err := p.Init() require.NoError(t, err) From 6b9079a39dc95065fcf6a7ae0953dc4f13f76e24 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 27 Jul 2021 14:46:07 -0400 Subject: [PATCH 25/48] test agent controller pulled out and refactored --- config/config_test.go | 105 ++++++------------- config/types_test.go | 3 +- plugins/inputs/execd/execd_test.go | 43 +------- plugins/inputs/opcua/opcua_client_test.go | 44 +------- plugins/outputs/sumologic/sumologic_test.go | 44 +------- plugins/parsers/json_v2/parser_test.go | 45 +------- plugins/processors/aws/ec2/ec2_test.go | 45 +------- plugins/processors/starlark/starlark_test.go | 43 +------- testhelper/testAgentController.go | 50 +++++++++ 9 files changed, 97 insertions(+), 325 deletions(-) create mode 100644 testhelper/testAgentController.go diff --git a/config/config_test.go b/config/config_test.go index a6c71e6a7664b..8f02a5a67f0d1 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -22,14 +22,15 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/file" "github.com/influxdata/telegraf/plugins/parsers" _ "github.com/influxdata/telegraf/plugins/processors/rename" + "github.com/influxdata/telegraf/testhelper" "github.com/stretchr/testify/require" ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() require.NoError(t, os.Setenv("MY_TEST_SERVER", "192.168.1.1")) require.NoError(t, os.Setenv("TEST_INTERVAL", "10s")) err := c.LoadConfig(context.Background(), context.Background(), "./testdata/single_plugin_env_vars.toml") @@ -75,9 +76,9 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { func TestConfig_LoadSingleInput(t *testing.T) { c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/single_plugin.toml") require.NoError(t, err) @@ -121,9 +122,9 @@ func TestConfig_LoadSingleInput(t *testing.T) { func TestConfig_LoadDirectory(t *testing.T) { c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/single_plugin.toml")) require.NoError(t, c.LoadDirectory(context.Background(), context.Background(), "./testdata/subconfig")) @@ -230,9 +231,9 @@ func TestConfig_LoadDirectory(t *testing.T) { func TestConfig_LoadSpecialTypes(t *testing.T) { c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/special_types.toml") require.NoError(t, err) require.Equal(t, 1, len(c.Inputs())) @@ -248,9 +249,9 @@ func TestConfig_LoadSpecialTypes(t *testing.T) { func TestConfig_FieldNotDefined(t *testing.T) { c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/invalid_field.toml") require.Error(t, err, "invalid field name") require.Equal(t, "Error loading config file ./testdata/invalid_field.toml: plugin inputs.http_listener_v2: line 1: configuration specified the fields [\"not_a_field\"], but they weren't used", err.Error()) @@ -258,9 +259,9 @@ func TestConfig_FieldNotDefined(t *testing.T) { func TestConfig_WrongFieldType(t *testing.T) { c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/wrong_field_type.toml") require.Error(t, err, "invalid field type") require.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: error parsing http_listener_v2, line 2: (config_test.MockupInputPlugin.Port) cannot unmarshal TOML string into int", err.Error()) @@ -275,9 +276,9 @@ func TestConfig_WrongFieldType(t *testing.T) { func TestConfig_InlineTables(t *testing.T) { // #4098 c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/inline_table.toml")) require.Len(t, c.Outputs(), 2) @@ -291,9 +292,9 @@ func TestConfig_SliceComment(t *testing.T) { t.Skipf("Skipping until #3642 is resolved") c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/slice_comment.toml")) require.Len(t, c.Outputs(), 1) @@ -306,9 +307,9 @@ func TestConfig_BadOrdering(t *testing.T) { // #3444: when not using inline tables, care has to be taken so subsequent configuration // doesn't become part of the table. This is not a bug, but TOML syntax. c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/non_slice_slice.toml") require.Error(t, err, "bad ordering") require.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) @@ -317,9 +318,9 @@ func TestConfig_BadOrdering(t *testing.T) { func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { // #8256 Cannot use empty string as the namespace prefix c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/azure_monitor.toml")) require.Len(t, c.Outputs(), 2) @@ -343,9 +344,9 @@ func TestConfig_URLRetries3Fails(t *testing.T) { expected := fmt.Sprintf("Error loading config file %s: Retry 3 of 3 failed to retrieve remote config: 404 Not Found", ts.URL) c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), ts.URL) require.Error(t, err) require.Equal(t, expected, err.Error()) @@ -366,9 +367,9 @@ func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { defer ts.Close() c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), ts.URL)) require.Equal(t, 4, responseCounter) } @@ -380,9 +381,9 @@ func TestConfig_getDefaultConfigPathFromEnvURL(t *testing.T) { defer ts.Close() c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := os.Setenv("TELEGRAF_CONFIG_PATH", ts.URL) require.NoError(t, err) configPath, err := config.GetDefaultConfigPath() @@ -394,9 +395,9 @@ func TestConfig_getDefaultConfigPathFromEnvURL(t *testing.T) { func TestConfig_URLLikeFileName(t *testing.T) { c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "http:##www.example.com.conf") require.Error(t, err) @@ -410,9 +411,9 @@ func TestConfig_URLLikeFileName(t *testing.T) { func TestConfig_OrderingProcessorsWithAggregators(t *testing.T) { c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/processor_and_aggregator_order.toml") require.NoError(t, err) require.Equal(t, 1, len(c.Inputs())) @@ -434,9 +435,9 @@ func TestConfig_OrderingProcessorsWithAggregators(t *testing.T) { func TestConfig_DefaultOrderingProcessorsWithAggregators(t *testing.T) { c := config.NewConfig() - agentController := &testAgentController{} + agentController := &testhelper.TestAgentController{} c.SetAgent(agentController) - defer agentController.reset() + defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/processor_and_aggregator_unordered.toml") require.NoError(t, err) require.Equal(t, 1, len(c.Inputs())) @@ -457,46 +458,6 @@ func TestConfig_DefaultOrderingProcessorsWithAggregators(t *testing.T) { require.EqualValues(t, expected, actual) } -type testAgentController struct { - inputs []*models.RunningInput - processors []models.ProcessorRunner - outputs []*models.RunningOutput - // configs []*config.RunningConfigPlugin -} - -func (a *testAgentController) reset() { - a.inputs = nil - a.processors = nil - a.outputs = nil - // a.configs = nil -} - -func (a *testAgentController) RunningInputs() []*models.RunningInput { - return a.inputs -} -func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { - return a.processors -} -func (a *testAgentController) RunningOutputs() []*models.RunningOutput { - return a.outputs -} -func (a *testAgentController) AddInput(input *models.RunningInput) { - a.inputs = append(a.inputs, input) -} -func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { - a.processors = append(a.processors, processor) -} -func (a *testAgentController) AddOutput(output *models.RunningOutput) { - a.outputs = append(a.outputs, output) -} -func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} -func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} -func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} -func (a *testAgentController) StopInput(i *models.RunningInput) {} -func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) StopOutput(p *models.RunningOutput) {} - /*** Mockup INPUT plugin for testing to avoid cyclic dependencies ***/ type MockupInputPlugin struct { Servers []string `toml:"servers"` diff --git a/config/types_test.go b/config/types_test.go index a8a10c26c263c..1ecaf13a63ad5 100644 --- a/config/types_test.go +++ b/config/types_test.go @@ -8,12 +8,13 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/processors/reverse_dns" + "github.com/influxdata/telegraf/testhelper" "github.com/stretchr/testify/require" ) func TestConfigDuration(t *testing.T) { c := config.NewConfig() - c.SetAgent(&testAgentController{}) + c.SetAgent(&testhelper.TestAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte(` [[processors.reverse_dns]] cache_ttl = "3h" diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index bd4e56d5390d9..4bbc9b587996d 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -19,6 +19,7 @@ import ( "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/testhelper" "github.com/influxdata/telegraf/testutil" ) @@ -30,7 +31,7 @@ func TestSettingConfigWorks(t *testing.T) { signal = "SIGHUP" ` conf := config.NewConfig() - conf.SetAgent(&testAgentController{}) + conf.SetAgent(&testhelper.TestAgentController{}) require.NoError(t, conf.LoadConfigData(context.Background(), context.Background(), []byte(cfg))) require.Len(t, conf.Inputs(), 1) @@ -196,43 +197,3 @@ func runCounterProgram() error { } return nil } - -type testAgentController struct { - inputs []*models.RunningInput - processors []models.ProcessorRunner - outputs []*models.RunningOutput - // configs []*config.RunningConfigPlugin -} - -func (a *testAgentController) reset() { - a.inputs = nil - a.processors = nil - a.outputs = nil - // a.configs = nil -} - -func (a *testAgentController) RunningInputs() []*models.RunningInput { - return a.inputs -} -func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { - return a.processors -} -func (a *testAgentController) RunningOutputs() []*models.RunningOutput { - return a.outputs -} -func (a *testAgentController) AddInput(input *models.RunningInput) { - a.inputs = append(a.inputs, input) -} -func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { - a.processors = append(a.processors, processor) -} -func (a *testAgentController) AddOutput(output *models.RunningOutput) { - a.outputs = append(a.outputs, output) -} -func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} -func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} -func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} -func (a *testAgentController) StopInput(i *models.RunningInput) {} -func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) StopOutput(p *models.RunningOutput) {} diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 3a678fc385520..6669844769971 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/testhelper" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -114,7 +114,7 @@ nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] ` c := config.NewConfig() - c.SetAgent(&testAgentController{}) + c.SetAgent(&testhelper.TestAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte(toml)) // opcua shouldn't be trying to connect in init. if !strings.Contains(err.Error(), "connection refused") { @@ -266,43 +266,3 @@ func TestValidateOPCTags(t *testing.T) { }) } } - -type testAgentController struct { - inputs []*models.RunningInput - processors []models.ProcessorRunner - outputs []*models.RunningOutput - // configs []*config.RunningConfigPlugin -} - -func (a *testAgentController) reset() { - a.inputs = nil - a.processors = nil - a.outputs = nil - // a.configs = nil -} - -func (a *testAgentController) RunningInputs() []*models.RunningInput { - return a.inputs -} -func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { - return a.processors -} -func (a *testAgentController) RunningOutputs() []*models.RunningOutput { - return a.outputs -} -func (a *testAgentController) AddInput(input *models.RunningInput) { - a.inputs = append(a.inputs, input) -} -func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { - a.processors = append(a.processors, processor) -} -func (a *testAgentController) AddOutput(output *models.RunningOutput) { - a.outputs = append(a.outputs, output) -} -func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} -func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} -func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} -func (a *testAgentController) StopInput(i *models.RunningInput) {} -func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) StopOutput(p *models.RunningOutput) {} diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 09bf41c9b294b..1879c98eba9bb 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -22,10 +22,10 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/serializers/carbon2" "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/influxdata/telegraf/testhelper" ) func getMetric(t *testing.T) telegraf.Metric { @@ -434,7 +434,7 @@ func TestTOMLConfig(t *testing.T) { for _, tt := range testcases { t.Run(tt.name, func(t *testing.T) { c := config.NewConfig() - c.SetAgent(&testAgentController{}) + c.SetAgent(&testhelper.TestAgentController{}) if tt.expectedError { require.Error(t, c.LoadConfigData(context.Background(), context.Background(), tt.configBytes)) @@ -650,43 +650,3 @@ func countLines(t *testing.T, body io.Reader) int { return linesCount } - -type testAgentController struct { - inputs []*models.RunningInput - processors []models.ProcessorRunner - outputs []*models.RunningOutput - // configs []*config.RunningConfigPlugin -} - -func (a *testAgentController) reset() { - a.inputs = nil - a.processors = nil - a.outputs = nil - // a.configs = nil -} - -func (a *testAgentController) RunningInputs() []*models.RunningInput { - return a.inputs -} -func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { - return a.processors -} -func (a *testAgentController) RunningOutputs() []*models.RunningOutput { - return a.outputs -} -func (a *testAgentController) AddInput(input *models.RunningInput) { - a.inputs = append(a.inputs, input) -} -func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { - a.processors = append(a.processors, processor) -} -func (a *testAgentController) AddOutput(output *models.RunningOutput) { - a.outputs = append(a.outputs, output) -} -func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} -func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} -func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} -func (a *testAgentController) StopInput(i *models.RunningInput) {} -func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) StopOutput(p *models.RunningOutput) {} diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index aaf7d0ff01592..c03ffcc1811cc 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -7,14 +7,13 @@ import ( "io/ioutil" "os" "testing" - "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/file" "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/testhelper" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -99,7 +98,7 @@ func TestData(t *testing.T) { return &file.File{} }) cfg := config.NewConfig() - cfg.SetAgent(&testAgentController{}) + cfg.SetAgent(&testhelper.TestAgentController{}) err = cfg.LoadConfigData(context.Background(), context.Background(), buf) require.NoError(t, err) @@ -148,43 +147,3 @@ func readMetricFile(path string) ([]telegraf.Metric, error) { return metrics, nil } - -type testAgentController struct { - inputs []*models.RunningInput - processors []models.ProcessorRunner - outputs []*models.RunningOutput - // configs []*config.RunningConfigPlugin -} - -func (a *testAgentController) reset() { - a.inputs = nil - a.processors = nil - a.outputs = nil - // a.configs = nil -} - -func (a *testAgentController) RunningInputs() []*models.RunningInput { - return a.inputs -} -func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { - return a.processors -} -func (a *testAgentController) RunningOutputs() []*models.RunningOutput { - return a.outputs -} -func (a *testAgentController) AddInput(input *models.RunningInput) { - a.inputs = append(a.inputs, input) -} -func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { - a.processors = append(a.processors, processor) -} -func (a *testAgentController) AddOutput(output *models.RunningOutput) { - a.outputs = append(a.outputs, output) -} -func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} -func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} -func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} -func (a *testAgentController) StopInput(i *models.RunningInput) {} -func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) StopOutput(p *models.RunningOutput) {} diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go index 754786d70384b..f63d3537f0689 100644 --- a/plugins/processors/aws/ec2/ec2_test.go +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -3,10 +3,9 @@ package ec2 import ( "context" "testing" - "time" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/testhelper" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -60,7 +59,7 @@ func TestBasicInitInvalidTagsReturnAnError(t *testing.T) { func TestLoadingConfig(t *testing.T) { c := config.NewConfig() - c.SetAgent(&testAgentController{}) + c.SetAgent(&testhelper.TestAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte( ` [[processors.aws_ec2]] @@ -74,43 +73,3 @@ func TestLoadingConfig(t *testing.T) { require.NoError(t, err) require.Len(t, c.Processors(), 1) } - -type testAgentController struct { - inputs []*models.RunningInput - processors []models.ProcessorRunner - outputs []*models.RunningOutput - // configs []*config.RunningConfigPlugin -} - -func (a *testAgentController) reset() { - a.inputs = nil - a.processors = nil - a.outputs = nil - // a.configs = nil -} - -func (a *testAgentController) RunningInputs() []*models.RunningInput { - return a.inputs -} -func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { - return a.processors -} -func (a *testAgentController) RunningOutputs() []*models.RunningOutput { - return a.outputs -} -func (a *testAgentController) AddInput(input *models.RunningInput) { - a.inputs = append(a.inputs, input) -} -func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { - a.processors = append(a.processors, processor) -} -func (a *testAgentController) AddOutput(output *models.RunningOutput) { - a.outputs = append(a.outputs, output) -} -func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} -func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} -func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} -func (a *testAgentController) StopInput(i *models.RunningInput) {} -func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) StopOutput(p *models.RunningOutput) {} diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index a3756ab418fe9..64ed5fda4df44 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -15,6 +15,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testhelper" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" starlarktime "go.starlark.net/lib/time" @@ -2606,7 +2607,7 @@ def apply(metric): // Build a Starlark plugin from the provided configuration. func buildPlugin(configContent string) (*Starlark, error) { c := config.NewConfig() - c.SetAgent(&testAgentController{}) + c.SetAgent(&testhelper.TestAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte(configContent)) if err != nil { return nil, err @@ -3301,43 +3302,3 @@ func testLoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, e func testNow(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { return starlarktime.Time(time.Date(2021, 4, 15, 12, 0, 0, 999, time.UTC)), nil } - -type testAgentController struct { - inputs []*models.RunningInput - processors []models.ProcessorRunner - outputs []*models.RunningOutput - // configs []*config.RunningConfigPlugin -} - -func (a *testAgentController) reset() { - a.inputs = nil - a.processors = nil - a.outputs = nil - // a.configs = nil -} - -func (a *testAgentController) RunningInputs() []*models.RunningInput { - return a.inputs -} -func (a *testAgentController) RunningProcessors() []models.ProcessorRunner { - return a.processors -} -func (a *testAgentController) RunningOutputs() []*models.RunningOutput { - return a.outputs -} -func (a *testAgentController) AddInput(input *models.RunningInput) { - a.inputs = append(a.inputs, input) -} -func (a *testAgentController) AddProcessor(processor models.ProcessorRunner) { - a.processors = append(a.processors, processor) -} -func (a *testAgentController) AddOutput(output *models.RunningOutput) { - a.outputs = append(a.outputs, output) -} -func (a *testAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} -func (a *testAgentController) RunProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} -func (a *testAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} -func (a *testAgentController) StopInput(i *models.RunningInput) {} -func (a *testAgentController) StopProcessor(p models.ProcessorRunner) {} -func (a *testAgentController) StopOutput(p *models.RunningOutput) {} diff --git a/testhelper/testAgentController.go b/testhelper/testAgentController.go new file mode 100644 index 0000000000000..cd6646855b0a0 --- /dev/null +++ b/testhelper/testAgentController.go @@ -0,0 +1,50 @@ +package testhelper + +// Do not import other Telegraf packages as it causes dependency loops +import ( + "context" + "time" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" +) + +type TestAgentController struct { + inputs []*models.RunningInput + processors []models.ProcessorRunner + outputs []*models.RunningOutput + // configs []*config.RunningConfigPlugin +} + +func (a *TestAgentController) Reset() { + a.inputs = nil + a.processors = nil + a.outputs = nil + // a.configs = nil +} + +func (a *TestAgentController) RunningInputs() []*models.RunningInput { + return a.inputs +} +func (a *TestAgentController) RunningProcessors() []models.ProcessorRunner { + return a.processors +} +func (a *TestAgentController) RunningOutputs() []*models.RunningOutput { + return a.outputs +} +func (a *TestAgentController) AddInput(input *models.RunningInput) { + a.inputs = append(a.inputs, input) +} +func (a *TestAgentController) AddProcessor(processor models.ProcessorRunner) { + a.processors = append(a.processors, processor) +} +func (a *TestAgentController) AddOutput(output *models.RunningOutput) { + a.outputs = append(a.outputs, output) +} +func (a *TestAgentController) RunInput(input *models.RunningInput, startTime time.Time) {} +func (a *TestAgentController) RunProcessor(p models.ProcessorRunner) {} +func (a *TestAgentController) RunOutput(ctx context.Context, output *models.RunningOutput) {} +func (a *TestAgentController) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) {} +func (a *TestAgentController) StopInput(i *models.RunningInput) {} +func (a *TestAgentController) StopProcessor(p models.ProcessorRunner) {} +func (a *TestAgentController) StopOutput(p *models.RunningOutput) {} From f75c7f5fd63ceaf6735dd1790f2a8a3d2cc9dad3 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 27 Jul 2021 17:24:23 -0500 Subject: [PATCH 26/48] Update docs/CONFIG_API.md --- docs/CONFIG_API.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/CONFIG_API.md b/docs/CONFIG_API.md index 3a35959473004..08da7c5486fc1 100644 --- a/docs/CONFIG_API.md +++ b/docs/CONFIG_API.md @@ -112,7 +112,7 @@ Create a new plugin. It will be started upon creation. ```json { - "name": "mqtt_consumer", + "name": "inputs.mqtt_consumer", "config": {. // .. }, @@ -192,4 +192,4 @@ An instance of a plugin running with a specific configuration name: string, config: Map[string, object], } -``` \ No newline at end of file +``` From 3a4b44e1205ddfcc8acec748a796f7a8e6a0736e Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 30 Jul 2021 14:17:33 -0400 Subject: [PATCH 27/48] rename to agent helper, add comment, remove fanout and unsed func in aggregator --- .../testAgentController.go | 4 ++- config/config_test.go | 34 +++++++++---------- config/types_test.go | 4 +-- internal/channel/fanout.go | 1 - internal/channel/fanout_test.go | 1 - models/running_aggregator.go | 14 ++------ models/running_aggregator_test.go | 12 +++---- plugins/inputs/execd/execd_test.go | 4 +-- plugins/inputs/opcua/opcua_client_test.go | 4 +-- plugins/outputs/sumologic/sumologic_test.go | 4 +-- plugins/parsers/json_v2/parser_test.go | 4 +-- plugins/processors/aws/ec2/ec2_test.go | 4 +-- plugins/processors/starlark/starlark_test.go | 4 +-- 13 files changed, 43 insertions(+), 51 deletions(-) rename {testhelper => agenthelper}/testAgentController.go (91%) delete mode 100644 internal/channel/fanout.go delete mode 100644 internal/channel/fanout_test.go diff --git a/testhelper/testAgentController.go b/agenthelper/testAgentController.go similarity index 91% rename from testhelper/testAgentController.go rename to agenthelper/testAgentController.go index cd6646855b0a0..e7087ea3be88c 100644 --- a/testhelper/testAgentController.go +++ b/agenthelper/testAgentController.go @@ -1,4 +1,4 @@ -package testhelper +package agenthelper // Do not import other Telegraf packages as it causes dependency loops import ( @@ -9,6 +9,8 @@ import ( "github.com/influxdata/telegraf/models" ) +// For use in unit tests where the test needs a config object but doesn't +// need to run its plugins. For example when testing parsing toml config. type TestAgentController struct { inputs []*models.RunningInput processors []models.ProcessorRunner diff --git a/config/config_test.go b/config/config_test.go index 8f02a5a67f0d1..08166ca9abb05 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/agenthelper" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" @@ -22,13 +23,12 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/file" "github.com/influxdata/telegraf/plugins/parsers" _ "github.com/influxdata/telegraf/plugins/processors/rename" - "github.com/influxdata/telegraf/testhelper" "github.com/stretchr/testify/require" ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() require.NoError(t, os.Setenv("MY_TEST_SERVER", "192.168.1.1")) @@ -76,7 +76,7 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { func TestConfig_LoadSingleInput(t *testing.T) { c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/single_plugin.toml") @@ -122,7 +122,7 @@ func TestConfig_LoadSingleInput(t *testing.T) { func TestConfig_LoadDirectory(t *testing.T) { c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/single_plugin.toml")) @@ -231,7 +231,7 @@ func TestConfig_LoadDirectory(t *testing.T) { func TestConfig_LoadSpecialTypes(t *testing.T) { c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/special_types.toml") @@ -249,7 +249,7 @@ func TestConfig_LoadSpecialTypes(t *testing.T) { func TestConfig_FieldNotDefined(t *testing.T) { c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/invalid_field.toml") @@ -259,7 +259,7 @@ func TestConfig_FieldNotDefined(t *testing.T) { func TestConfig_WrongFieldType(t *testing.T) { c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/wrong_field_type.toml") @@ -276,7 +276,7 @@ func TestConfig_WrongFieldType(t *testing.T) { func TestConfig_InlineTables(t *testing.T) { // #4098 c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/inline_table.toml")) @@ -292,7 +292,7 @@ func TestConfig_SliceComment(t *testing.T) { t.Skipf("Skipping until #3642 is resolved") c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/slice_comment.toml")) @@ -307,7 +307,7 @@ func TestConfig_BadOrdering(t *testing.T) { // #3444: when not using inline tables, care has to be taken so subsequent configuration // doesn't become part of the table. This is not a bug, but TOML syntax. c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/non_slice_slice.toml") @@ -318,7 +318,7 @@ func TestConfig_BadOrdering(t *testing.T) { func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { // #8256 Cannot use empty string as the namespace prefix c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), "./testdata/azure_monitor.toml")) @@ -344,7 +344,7 @@ func TestConfig_URLRetries3Fails(t *testing.T) { expected := fmt.Sprintf("Error loading config file %s: Retry 3 of 3 failed to retrieve remote config: 404 Not Found", ts.URL) c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), ts.URL) @@ -367,7 +367,7 @@ func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { defer ts.Close() c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() require.NoError(t, c.LoadConfig(context.Background(), context.Background(), ts.URL)) @@ -381,7 +381,7 @@ func TestConfig_getDefaultConfigPathFromEnvURL(t *testing.T) { defer ts.Close() c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := os.Setenv("TELEGRAF_CONFIG_PATH", ts.URL) @@ -395,7 +395,7 @@ func TestConfig_getDefaultConfigPathFromEnvURL(t *testing.T) { func TestConfig_URLLikeFileName(t *testing.T) { c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "http:##www.example.com.conf") @@ -411,7 +411,7 @@ func TestConfig_URLLikeFileName(t *testing.T) { func TestConfig_OrderingProcessorsWithAggregators(t *testing.T) { c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/processor_and_aggregator_order.toml") @@ -435,7 +435,7 @@ func TestConfig_OrderingProcessorsWithAggregators(t *testing.T) { func TestConfig_DefaultOrderingProcessorsWithAggregators(t *testing.T) { c := config.NewConfig() - agentController := &testhelper.TestAgentController{} + agentController := &agenthelper.TestAgentController{} c.SetAgent(agentController) defer agentController.Reset() err := c.LoadConfig(context.Background(), context.Background(), "./testdata/processor_and_aggregator_unordered.toml") diff --git a/config/types_test.go b/config/types_test.go index 1ecaf13a63ad5..1eaee932ba475 100644 --- a/config/types_test.go +++ b/config/types_test.go @@ -5,16 +5,16 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/agenthelper" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/processors/reverse_dns" - "github.com/influxdata/telegraf/testhelper" "github.com/stretchr/testify/require" ) func TestConfigDuration(t *testing.T) { c := config.NewConfig() - c.SetAgent(&testhelper.TestAgentController{}) + c.SetAgent(&agenthelper.TestAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte(` [[processors.reverse_dns]] cache_ttl = "3h" diff --git a/internal/channel/fanout.go b/internal/channel/fanout.go deleted file mode 100644 index 9c86d4ad8b45c..0000000000000 --- a/internal/channel/fanout.go +++ /dev/null @@ -1 +0,0 @@ -package channel diff --git a/internal/channel/fanout_test.go b/internal/channel/fanout_test.go deleted file mode 100644 index 9c86d4ad8b45c..0000000000000 --- a/internal/channel/fanout_test.go +++ /dev/null @@ -1 +0,0 @@ -package channel diff --git a/models/running_aggregator.go b/models/running_aggregator.go index 15ae20a14ded2..2a48bf99b4ad9 100644 --- a/models/running_aggregator.go +++ b/models/running_aggregator.go @@ -113,15 +113,7 @@ func (r *RunningAggregator) Init() error { return nil } -// func (r *RunningAggregator) Period() time.Duration { -// return r.Config.Period -// } - -// func (r *RunningAggregator) EndPeriod() time.Time { -// return r.periodEnd -// } - -func (r *RunningAggregator) updateWindow(start, until time.Time) { +func (r *RunningAggregator) UpdateWindow(start, until time.Time) { r.periodStart = start r.periodEnd = until r.log.Debugf("Updated aggregation range [%s, %s]", start, until) @@ -178,7 +170,7 @@ func (r *RunningAggregator) Push(acc telegraf.Accumulator) { since := r.periodEnd until := r.periodEnd.Add(r.Config.Period) - r.updateWindow(since, until) + r.UpdateWindow(since, until) r.push(acc.WithNewMetricMaker(r.LogName(), r.Log(), r.pushMetricMaker)) r.Aggregator.Reset() @@ -189,7 +181,7 @@ func (r *RunningAggregator) Start(acc telegraf.Accumulator) error { r.setState(PluginStateRunning) since, until := r.calculateUpdateWindow(time.Now()) - r.updateWindow(since, until) + r.UpdateWindow(since, until) r.ctx, r.cancel = context.WithCancel(context.Background()) r.wg.Add(1) diff --git a/models/running_aggregator_test.go b/models/running_aggregator_test.go index c080816d5118c..2d180b795adcc 100644 --- a/models/running_aggregator_test.go +++ b/models/running_aggregator_test.go @@ -23,7 +23,7 @@ func TestAdd(t *testing.T) { acc := &testutil.Accumulator{} now := time.Now() - ra.updateWindow(now, now.Add(ra.Config.Period)) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -54,7 +54,7 @@ func TestAddWithoutDrop(t *testing.T) { acc := &testutil.Accumulator{} now := time.Now() - ra.updateWindow(now, now.Add(ra.Config.Period)) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -88,7 +88,7 @@ func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { require.NoError(t, ra.Config.Filter.Compile()) acc := &testutil.Accumulator{} now := time.Now() - ra.updateWindow(now, now.Add(ra.Config.Period)) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -143,7 +143,7 @@ func TestAddMetricsOutsideCurrentPeriodWithGrace(t *testing.T) { require.NoError(t, ra.Config.Filter.Compile()) acc := &testutil.Accumulator{} now := time.Now() - ra.updateWindow(now, now.Add(ra.Config.Period)) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -209,7 +209,7 @@ func TestAddAndPushOnePeriod(t *testing.T) { acc := &testutil.Accumulator{} now := time.Now() - ra.updateWindow(now, now.Add(ra.Config.Period)) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, @@ -237,7 +237,7 @@ func TestAddDropOriginal(t *testing.T) { require.NoError(t, ra.Config.Filter.Compile()) now := time.Now() - ra.updateWindow(now, now.Add(ra.Config.Period)) + ra.UpdateWindow(now, now.Add(ra.Config.Period)) m := testutil.MustMetric("RITest", map[string]string{}, diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index 4bbc9b587996d..433f7d8413033 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -14,12 +14,12 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/agenthelper" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/influxdata/telegraf/testhelper" "github.com/influxdata/telegraf/testutil" ) @@ -31,7 +31,7 @@ func TestSettingConfigWorks(t *testing.T) { signal = "SIGHUP" ` conf := config.NewConfig() - conf.SetAgent(&testhelper.TestAgentController{}) + conf.SetAgent(&agenthelper.TestAgentController{}) require.NoError(t, conf.LoadConfigData(context.Background(), context.Background(), []byte(cfg))) require.Len(t, conf.Inputs(), 1) diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 6669844769971..4217ec2b2ee63 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/agenthelper" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/testhelper" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -114,7 +114,7 @@ nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] ` c := config.NewConfig() - c.SetAgent(&testhelper.TestAgentController{}) + c.SetAgent(&agenthelper.TestAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte(toml)) // opcua shouldn't be trying to connect in init. if !strings.Contains(err.Error(), "connection refused") { diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 1879c98eba9bb..b5564a74e2ff6 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -19,13 +19,13 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/agenthelper" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/serializers/carbon2" "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/prometheus" - "github.com/influxdata/telegraf/testhelper" ) func getMetric(t *testing.T) telegraf.Metric { @@ -434,7 +434,7 @@ func TestTOMLConfig(t *testing.T) { for _, tt := range testcases { t.Run(tt.name, func(t *testing.T) { c := config.NewConfig() - c.SetAgent(&testhelper.TestAgentController{}) + c.SetAgent(&agenthelper.TestAgentController{}) if tt.expectedError { require.Error(t, c.LoadConfigData(context.Background(), context.Background(), tt.configBytes)) diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index c03ffcc1811cc..213be5f3f6ab5 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -9,11 +9,11 @@ import ( "testing" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/agenthelper" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/file" "github.com/influxdata/telegraf/plugins/parsers/influx" - "github.com/influxdata/telegraf/testhelper" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -98,7 +98,7 @@ func TestData(t *testing.T) { return &file.File{} }) cfg := config.NewConfig() - cfg.SetAgent(&testhelper.TestAgentController{}) + cfg.SetAgent(&agenthelper.TestAgentController{}) err = cfg.LoadConfigData(context.Background(), context.Background(), buf) require.NoError(t, err) diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go index f63d3537f0689..b2e28abb16a19 100644 --- a/plugins/processors/aws/ec2/ec2_test.go +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -4,8 +4,8 @@ import ( "context" "testing" + "github.com/influxdata/telegraf/agenthelper" "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/testhelper" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -59,7 +59,7 @@ func TestBasicInitInvalidTagsReturnAnError(t *testing.T) { func TestLoadingConfig(t *testing.T) { c := config.NewConfig() - c.SetAgent(&testhelper.TestAgentController{}) + c.SetAgent(&agenthelper.TestAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte( ` [[processors.aws_ec2]] diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 64ed5fda4df44..16609111fa67b 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -12,10 +12,10 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/agenthelper" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testhelper" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" starlarktime "go.starlark.net/lib/time" @@ -2607,7 +2607,7 @@ def apply(metric): // Build a Starlark plugin from the provided configuration. func buildPlugin(configContent string) (*Starlark, error) { c := config.NewConfig() - c.SetAgent(&testhelper.TestAgentController{}) + c.SetAgent(&agenthelper.TestAgentController{}) err := c.LoadConfigData(context.Background(), context.Background(), []byte(configContent)) if err != nil { return nil, err From 9f03f3823ff3540e2703958196bb742362948511 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 2 Aug 2021 14:51:23 -0400 Subject: [PATCH 28/48] fix ec2 tests to be init --- plugins/processors/aws/ec2/ec2_test.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go index b2e28abb16a19..11b8ed7d77417 100644 --- a/plugins/processors/aws/ec2/ec2_test.go +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -11,15 +11,11 @@ import ( ) func TestBasicStartup(t *testing.T) { - if testing.Short() { - t.Skip("skipped test that connects to external service") - } p := newAwsEc2Processor() p.Log = &testutil.Logger{} p.ImdsTags = []string{"accountId", "instanceId"} acc := &testutil.Accumulator{} - require.NoError(t, p.Start(acc)) - require.NoError(t, p.Stop()) + require.NoError(t, p.Init()) require.Len(t, acc.GetTelegrafMetrics(), 0) require.Len(t, acc.Errors, 0) @@ -34,8 +30,7 @@ func TestBasicStartupWithEC2Tags(t *testing.T) { p.ImdsTags = []string{"accountId", "instanceId"} p.EC2Tags = []string{"Name"} acc := &testutil.Accumulator{} - require.NoError(t, p.Start(acc)) - require.NoError(t, p.Stop()) + require.NoError(t, p.Init()) require.Len(t, acc.GetTelegrafMetrics(), 0) require.Len(t, acc.Errors, 0) From f32b3dd87a66197c39c9b4e8d5d81e8abd65509b Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 2 Aug 2021 14:52:39 -0400 Subject: [PATCH 29/48] remove skip --- plugins/processors/aws/ec2/ec2_test.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugins/processors/aws/ec2/ec2_test.go b/plugins/processors/aws/ec2/ec2_test.go index 11b8ed7d77417..e7f16a01252e1 100644 --- a/plugins/processors/aws/ec2/ec2_test.go +++ b/plugins/processors/aws/ec2/ec2_test.go @@ -22,9 +22,6 @@ func TestBasicStartup(t *testing.T) { } func TestBasicStartupWithEC2Tags(t *testing.T) { - if testing.Short() { - t.Skip("skipped test that connects to external service") - } p := newAwsEc2Processor() p.Log = &testutil.Logger{} p.ImdsTags = []string{"accountId", "instanceId"} From 34d5dc2117ba27a5733ec2d6d806a43412463cb7 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Mon, 2 Aug 2021 15:03:59 -0400 Subject: [PATCH 30/48] move k.done --- plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index 5429ae2f294ca..ab19e0875820a 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -91,7 +91,6 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { defer k.Unlock() var consumerErr error - k.done = make(chan struct{}) k.acc = acc config := consumergroup.NewConfig() @@ -123,6 +122,8 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { k.errs = k.Consumer.Errors() } + k.done = make(chan struct{}) + // Start the kafka message reader go k.receiver() k.Log.Infof("Started the kafka consumer service, peers: %v, topics: %v\n", From 8b593beff3585faa1cfc3fc0d54462d19e2308b6 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 3 Aug 2021 15:23:29 -0400 Subject: [PATCH 31/48] add delete/update plugin --- plugins/configs/api/listener.go | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index fa8400702636f..3680d7959af68 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -37,6 +37,7 @@ func (s *ConfigAPIService) mux() *mux.Router { m.HandleFunc("/plugins/{id:[0-9a-f]+}/status", s.pluginStatus) m.HandleFunc("/plugins/list", s.listPlugins) m.HandleFunc("/plugins/running", s.runningPlugins) + m.HandleFunc("/plugins/{id:[0-9a-f]+}", s.deleteOrUpdatePlugin) return m } @@ -125,3 +126,33 @@ func (s *ConfigAPIService) Stop() { log.Printf("W! [configapi] error on shutdown: %s", err) } } + +func (s *ConfigAPIService) deleteOrUpdatePlugin(w http.ResponseWriter, req *http.Request) { + switch req.Method { + case "DELETE": + s.deletePlugin(w, req) + case "PUT": + s.updatePlugin(w, req) + default: + w.WriteHeader(http.StatusBadRequest) + } +} + +func (s *ConfigAPIService) deletePlugin(w http.ResponseWriter, req *http.Request) { + defer req.Body.Close() + id := mux.Vars(req)["id"] + if len(id) == 0 { + w.WriteHeader(http.StatusBadRequest) + return + } + if err := s.api.DeletePlugin(models.PluginID(id)); err != nil { + s.Log.Error("error deleting plugin %v", err) + // TODO: improve error handling? Would like to see status based on error type + w.WriteHeader(http.StatusInternalServerError) + } + w.WriteHeader(http.StatusOK) +} + +func (s *ConfigAPIService) updatePlugin(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusNotImplemented) +} From 9794b45c5560f468d491016cb9c9a052d9199360 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 4 Aug 2021 10:40:45 -0500 Subject: [PATCH 32/48] resolve agent feedback around potential race condition --- agent/agent.go | 78 ++++++++++++++++++++++++++++++++------- internal/channel/relay.go | 6 +++ 2 files changed, 70 insertions(+), 14 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 489407a77b8ee..70c2de8a6422c 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -18,6 +18,19 @@ import ( "github.com/influxdata/telegraf/models" ) +// agentState describes the running state of the agent. +// Plugins can only be ran once the agent is running, +// you can add plugins before the agent has started. +// you cannot remove plugins before the agent has started. +// you cannot add or remove plugins once it has started shutting down. +type agentState int8 + +const ( + agentStateStarting agentState = iota + agentStateRunning + agentStateShuttingDown +) + // Agent runs a set of plugins. type Agent struct { Config *config.Config @@ -31,8 +44,8 @@ type Agent struct { ctx context.Context - started *sync.Cond // a condition that lets plugins know when hasStarted is true (when the agent starts) - hasStarted bool + stateChanged *sync.Cond // a condition that lets plugins know when when the agent state changes + state agentState } // NewAgent returns an Agent for the given Config. @@ -44,9 +57,10 @@ func NewAgent(ctx context.Context, cfg *config.Config) *Agent { // as processors are added, they will be inserted between these two. return &Agent{ - Config: cfg, - ctx: ctx, - started: sync.NewCond(&sync.Mutex{}), + Config: cfg, + ctx: ctx, + stateChanged: sync.NewCond(&sync.Mutex{}), + state: agentStateStarting, inputGroupUnit: inputGroupUnit{ dst: inputDestCh, relay: channel.NewRelay(inputDestCh, outputSrcCh), @@ -158,20 +172,23 @@ func (a *Agent) RunWithAPI(outputCancel context.CancelFunc) { a.inputGroupUnit.relay.Start() - a.started.L.Lock() - a.hasStarted = true - a.started.Broadcast() - a.started.L.Unlock() + a.setState(agentStateRunning) <-a.Context().Done() + a.setState(agentStateShuttingDown) + // wait for all plugins to stop a.waitForPluginsToStop() log.Printf("D! [agent] Stopped Successfully") } +// AddInput adds an input to the agent to be managed func (a *Agent) AddInput(input *models.RunningInput) { + if a.isState(agentStateShuttingDown) { + return + } a.inputGroupUnit.Lock() defer a.inputGroupUnit.Unlock() @@ -184,12 +201,8 @@ func (a *Agent) AddInput(input *models.RunningInput) { func (a *Agent) startInput(input *models.RunningInput) error { // plugins can start before the agent has started; wait until it's asked to // start before collecting metrics in case other plugins are still loading. - a.started.L.Lock() - for !a.hasStarted { - a.started.Wait() - } + a.waitUntilState(agentStateRunning) - a.started.L.Unlock() a.inputGroupUnit.Lock() // Service input plugins are not normally subject to timestamp // rounding except for when precision is set on the input plugin. @@ -363,6 +376,9 @@ func (a *Agent) gatherOnce( } func (a *Agent) AddProcessor(processor models.ProcessorRunner) { + if a.isState(agentStateShuttingDown) { + return + } a.inputGroupUnit.Lock() defer a.inputGroupUnit.Unlock() a.processorGroupUnit.Lock() @@ -483,6 +499,10 @@ func (a *Agent) RunProcessor(p models.ProcessorRunner) { // StopProcessor stops processors or aggregators. // ProcessorRunner could be a *models.RunningProcessor or a *models.RunningAggregator func (a *Agent) StopProcessor(p models.ProcessorRunner) { + if a.isState(agentStateShuttingDown) { + return + } + a.processorGroupUnit.Lock() defer a.processorGroupUnit.Unlock() @@ -539,6 +559,9 @@ func (a *Agent) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) } func (a *Agent) StopOutput(output *models.RunningOutput) { + if a.isState(agentStateShuttingDown) { + return + } a.outputGroupUnit.Lock() defer a.outputGroupUnit.Unlock() @@ -569,6 +592,9 @@ func updateWindow(start time.Time, roundInterval bool, period time.Duration) (ti } func (a *Agent) AddOutput(output *models.RunningOutput) { + if a.isState(agentStateShuttingDown) { + return + } a.outputGroupUnit.Lock() a.outputGroupUnit.outputs = append(a.outputGroupUnit.outputs, outputUnit{output: output}) a.outputGroupUnit.Unlock() @@ -868,3 +894,27 @@ func (a *Agent) waitForPluginsToStop() { // everything closed; shut down } + +// setState sets the agent's internal state. +func (a *Agent) setState(newState agentState) { + a.stateChanged.L.Lock() + a.state = newState + a.stateChanged.Broadcast() + a.stateChanged.L.Unlock() +} + +// isState returns true if the agent state matches the state parameter +func (a *Agent) isState(state agentState) bool { + a.stateChanged.L.Lock() + defer a.stateChanged.L.Unlock() + return a.state == state +} + +// waitUntilState waits until the agent state is the requested state. +func (a *Agent) waitUntilState(state agentState) { + a.stateChanged.L.Lock() + for a.state != state { + a.stateChanged.Wait() + } + a.stateChanged.L.Unlock() +} diff --git a/internal/channel/relay.go b/internal/channel/relay.go index c02fd726d9d26..624e673da8099 100644 --- a/internal/channel/relay.go +++ b/internal/channel/relay.go @@ -6,6 +6,9 @@ import ( "github.com/influxdata/telegraf" ) +// Relay metrics from one channel to another. Connects a source channel to a destination channel +// Think of it as connecting two pipes together. +// dst is closed when src is closed and the last message has been written out to dst. type Relay struct { sync.Mutex src <-chan telegraf.Metric @@ -28,12 +31,15 @@ func (r *Relay) Start() { }() } +// SetDest changes the destination channel. this is to make channels hot-swappable, kind of like adding or removing items in a linked list. +// Should not be called after the source channel closes. func (r *Relay) SetDest(dst chan<- telegraf.Metric) { r.Lock() defer r.Unlock() r.dst = dst } +// GetDest is the current dst channel func (r *Relay) GetDest() chan<- telegraf.Metric { r.Lock() defer r.Unlock() From efa4eecd9d748dcbbb98259da26feba98d54ab72 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 4 Aug 2021 11:06:31 -0500 Subject: [PATCH 33/48] resolves feedback --- plugins/configs/api/controller.go | 3 +++ plugins/configs/api/listener.go | 4 ++++ plugins/configs/api/plugin.go | 21 +++++++++++++-------- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index a7198d1eb3453..e41e688e7bb96 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -156,6 +156,9 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { runningPlugins = append(runningPlugins, p) } // TODO: add more types? + if runningPlugins == nil { + return []Plugin{} + } return runningPlugins } diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index 3680d7959af68..40a0812eb7cc0 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -66,6 +66,7 @@ func (s *ConfigAPIService) createPlugin(w http.ResponseWriter, req *http.Request w.WriteHeader(http.StatusBadRequest) return } + w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte(fmt.Sprintf(`{"id": "%s"}`, id))) } @@ -88,6 +89,7 @@ func (s *ConfigAPIService) listPlugins(w http.ResponseWriter, req *http.Request) w.WriteHeader(http.StatusInternalServerError) return } + w.Header().Set("Content-Type", "application/json") _, _ = w.Write(bytes) } @@ -101,6 +103,7 @@ func (s *ConfigAPIService) runningPlugins(w http.ResponseWriter, req *http.Reque w.WriteHeader(http.StatusInternalServerError) return } + w.Header().Set("Content-Type", "application/json") _, _ = w.Write(bytes) } @@ -112,6 +115,7 @@ func (s *ConfigAPIService) pluginStatus(w http.ResponseWriter, req *http.Request return } state := s.api.GetPluginStatus(models.PluginID(id)) + w.Header().Set("Content-Type", "application/json") _, err := w.Write([]byte(fmt.Sprintf(`{"status": %q}`, state.String()))) if err != nil { log.Printf("W! error writing to connection: %v", err) diff --git a/plugins/configs/api/plugin.go b/plugins/configs/api/plugin.go index e63166e2219fb..6a5df2eee8f22 100644 --- a/plugins/configs/api/plugin.go +++ b/plugins/configs/api/plugin.go @@ -30,13 +30,16 @@ func (a *ConfigAPIPlugin) GetName() string { func (a *ConfigAPIPlugin) Init(ctx context.Context, cfg *config.Config, agent config.AgentController) error { a.api, a.cancel = newAPI(ctx, cfg, agent) + if a.Storage == nil { + a.Log.Warn("initializing config-api without storage, changes via the api will not be persisted.") + } else { + if err := a.Storage.Init(); err != nil { + return fmt.Errorf("initializing storage: %w", err) + } - if err := a.Storage.Init(); err != nil { - return fmt.Errorf("initializing storage: %w", err) - } - - if err := a.Storage.Load("config-api", "plugins", &a.plugins); err != nil { - return fmt.Errorf("loading plugin state: %w", err) + if err := a.Storage.Load("config-api", "plugins", &a.plugins); err != nil { + return fmt.Errorf("loading plugin state: %w", err) + } } // start listening for HTTP requests @@ -63,8 +66,10 @@ func (a *ConfigAPIPlugin) Close() error { a.server.Stop() // store state - if err := a.Storage.Save("config-api", "plugins", &a.plugins); err != nil { - return fmt.Errorf("saving plugin state: %w", err) + if a.Storage != nil { + if err := a.Storage.Save("config-api", "plugins", &a.plugins); err != nil { + return fmt.Errorf("saving plugin state: %w", err) + } } return nil } From 0ed2b60546465e9c0054a42da0f8db6e911c9d91 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 4 Aug 2021 11:10:09 -0500 Subject: [PATCH 34/48] resolves feedback --- agent/agent.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/agent.go b/agent/agent.go index 70c2de8a6422c..b37762d427839 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -794,6 +794,7 @@ func getPrecision(precision, interval time.Duration) time.Duration { // panicRecover displays an error if an input panics. func panicRecover(input *models.RunningInput) { + // nolint:revive // this function must be called with defer if err := recover(); err != nil { trace := make([]byte, 2048) runtime.Stack(trace, true) @@ -891,7 +892,6 @@ func (a *Agent) waitForPluginsToStop() { break } a.configPluginUnit.Unlock() - // everything closed; shut down } From cf7ced46288de626b7e995430b0fa6bfeb4cd113 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 4 Aug 2021 14:51:31 -0400 Subject: [PATCH 35/48] add allowed methods to mux router --- plugins/configs/api/listener.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index 40a0812eb7cc0..3063bb99c959e 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -32,12 +32,12 @@ func newConfigAPIService(server *http.Server, api *api, logger telegraf.Logger) // nolint:revive func (s *ConfigAPIService) mux() *mux.Router { m := mux.NewRouter() - m.HandleFunc("/status", s.status) - m.HandleFunc("/plugins/create", s.createPlugin) - m.HandleFunc("/plugins/{id:[0-9a-f]+}/status", s.pluginStatus) - m.HandleFunc("/plugins/list", s.listPlugins) - m.HandleFunc("/plugins/running", s.runningPlugins) - m.HandleFunc("/plugins/{id:[0-9a-f]+}", s.deleteOrUpdatePlugin) + m.HandleFunc("/status", s.status).Methods("GET") + m.HandleFunc("/plugins/create", s.createPlugin).Methods("POST") + m.HandleFunc("/plugins/{id:[0-9a-f]+}/status", s.pluginStatus).Methods("GET") + m.HandleFunc("/plugins/list", s.listPlugins).Methods("GET") + m.HandleFunc("/plugins/running", s.runningPlugins).Methods("GET") + m.HandleFunc("/plugins/{id:[0-9a-f]+}", s.deleteOrUpdatePlugin).Methods("DELETE", "PUT") return m } From 06d4d3fb37292a04cab82ba990ab033143fe8576 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Wed, 4 Aug 2021 14:52:50 -0500 Subject: [PATCH 36/48] update config api docs location --- .../configs/api/README.md | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) rename docs/CONFIG_API.md => plugins/configs/api/README.md (84%) diff --git a/docs/CONFIG_API.md b/plugins/configs/api/README.md similarity index 84% rename from docs/CONFIG_API.md rename to plugins/configs/api/README.md index 08da7c5486fc1..e081fbdbfac41 100644 --- a/docs/CONFIG_API.md +++ b/plugins/configs/api/README.md @@ -1,5 +1,20 @@ # Config API +The Config API allows you to use HTTP requests to make changes to the set of running +plugins, starting or stopping plugins as needed without restarting Telegraf. When +configured with storage, the current list of running plugins and their configuration is +saved, and will persist across restarts of Telegraf. + +## Example Config + +```toml + [[config.api]] + service_address = ":7551" + [config.api.storage.internal] + file = "config_state.db" +``` + + ## Endpoints ### GET /plugins/list @@ -93,7 +108,8 @@ none "persistent_session": false, "client_id": "", "username": "telegraf", - // password not returned + // some fields, like "password", are settable, but their values are not returned + "password": "********", "tls_ca": "/etc/telegraf/ca.pem", "tls_cert": "/etc/telegraf/cert.pem", "tls_key": "/etc/telegraf/key.pem", @@ -113,7 +129,7 @@ Create a new plugin. It will be started upon creation. ```json { "name": "inputs.mqtt_consumer", - "config": {. + "config": { // .. }, }, From 6730cc692c3da262e000c5e401754246f395011e Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 6 Aug 2021 14:13:41 -0400 Subject: [PATCH 37/48] stop plugin test --- plugins/configs/api/listener_test.go | 81 ++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/plugins/configs/api/listener_test.go b/plugins/configs/api/listener_test.go index 54ce7ca3dc365..986700e7aed04 100644 --- a/plugins/configs/api/listener_test.go +++ b/plugins/configs/api/listener_test.go @@ -100,3 +100,84 @@ func TestStartPlugin(t *testing.T) { require.FailNow(t, "expected there to be 1 running plugin, was", len(runningList)) } } + +func TestStopPlugin(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c := config.NewConfig() + a := agent.NewAgent(ctx, c) + api, outputCancel := newAPI(ctx, c, a) + go a.RunWithAPI(outputCancel) + + s := &ConfigAPIService{ + api: api, + } + srv := httptest.NewServer(s.mux()) + + // start plugin + buf := bytes.NewBufferString(`{ + "name": "inputs.cpu", + "config": { + "percpu": true + } +}`) + resp, err := http.Post(srv.URL+"/plugins/create", "application/json", buf) + require.NoError(t, err) + createResp := struct { + ID string + }{} + require.EqualValues(t, 200, resp.StatusCode) + err = json.NewDecoder(resp.Body).Decode(&createResp) + require.NoError(t, err) + _ = resp.Body.Close() + + require.Regexp(t, `^[\da-f]{8}\d{8}$`, createResp.ID) + + resp, err = http.Get(srv.URL + "/plugins/running") + require.NoError(t, err) + require.EqualValues(t, 200, resp.StatusCode) + runningList := []Plugin{} + err = json.NewDecoder(resp.Body).Decode(&runningList) + require.NoError(t, err) + _ = resp.Body.Close() + + // confirm plugin is running + if len(runningList) != 1 { + require.FailNow(t, "expected there to be 1 running plugin, was", len(runningList)) + } + + // stop plugin + client := &http.Client{} + req, err := http.NewRequest("DELETE", srv.URL+"/plugins/"+createResp.ID, nil) + + resp, err = client.Do(req) + require.NoError(t, err) + _ = resp.Body.Close() + + require.EqualValues(t, 200, resp.StatusCode) + require.NoError(t, err) + + resp, err = http.Get(srv.URL + "/plugins/running") + require.NoError(t, err) + require.EqualValues(t, 200, resp.StatusCode) + runningList = []Plugin{} + err = json.NewDecoder(resp.Body).Decode(&runningList) + require.NoError(t, err) + _ = resp.Body.Close() + + // confirm plugin has stopped + if len(runningList) >= 1 { + require.FailNow(t, "expected there to be no running plugin, was", len(runningList)) + } + + // try to delete a plugin which was already been deleted + req, err = http.NewRequest("DELETE", srv.URL+"/plugins/"+createResp.ID, nil) + + resp, err = client.Do(req) + require.NoError(t, err) + _ = resp.Body.Close() + + // still expect 200 response + require.EqualValues(t, 200, resp.StatusCode) + require.NoError(t, err) +} From 84ef98ae67423878300e2ddd8668b20126573de7 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 6 Aug 2021 14:22:01 -0400 Subject: [PATCH 38/48] linter fix --- plugins/configs/api/listener_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/configs/api/listener_test.go b/plugins/configs/api/listener_test.go index 986700e7aed04..10b24d260581f 100644 --- a/plugins/configs/api/listener_test.go +++ b/plugins/configs/api/listener_test.go @@ -149,6 +149,7 @@ func TestStopPlugin(t *testing.T) { // stop plugin client := &http.Client{} req, err := http.NewRequest("DELETE", srv.URL+"/plugins/"+createResp.ID, nil) + require.NoError(t, err) resp, err = client.Do(req) require.NoError(t, err) @@ -172,6 +173,7 @@ func TestStopPlugin(t *testing.T) { // try to delete a plugin which was already been deleted req, err = http.NewRequest("DELETE", srv.URL+"/plugins/"+createResp.ID, nil) + require.NoError(t, err) resp, err = client.Do(req) require.NoError(t, err) From 9992d86793753d25d6e0b23a00aca0fef7fbe588 Mon Sep 17 00:00:00 2001 From: David Reimschussel Date: Mon, 9 Aug 2021 13:36:53 -0600 Subject: [PATCH 39/48] Add shell script to test closing on sigint --- int_test.sh | 111 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100755 int_test.sh diff --git a/int_test.sh b/int_test.sh new file mode 100755 index 0000000000000..325ad59f0d551 --- /dev/null +++ b/int_test.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +#exit script if any command/pipeline fails +set -e + +#unset variables are errors +set -u + +#return value of a pipeline is the last non-zero return +set -o pipefail + +#print commands before they're executed +#set -x + + +file=telegraf.conf.int_test +port=7551 + +function log() +{ + echo ------- $* +} + +log writing temp config file +tee $file < /dev/null; do + log waiting for listening port + sleep 2 +done + +#it doesn't accept connections for a while after it starts listening +#sleep 10 + +log rest create outputs.file +curl \ + -d '{"name": "outputs.file", "config": {"files": ["stdout"]} }' \ + -H "Content-Type: application/json" \ + -X POST \ + localhost:$port/plugins/create +echo + +log rest create inputs.cpu +curl \ + -d '{"name": "inputs.cpu" }' \ + -H "Content-Type: application/json" \ + -X POST \ + localhost:$port/plugins/create +echo + +function running() +{ + ps $pid > /dev/null + local ret=$? + #echo $ret + return $ret +} + +log make sure it''s still running +running + +log send SIGINT +kill $pid + +log wait for it to exit +sleep 2 + +log make sure it has exited +[ ! running ] + +#unhook the cleanup trap +trap EXIT + +#remove the temp config file +rm $file + +log success From a6f3f4c6b98809155e06c27c8aa96a41a9f7cfd8 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 6 Aug 2021 14:10:28 -0500 Subject: [PATCH 40/48] resolve some TODOs --- agent/agent.go | 12 +++- config/config.go | 2 - config/config_test.go | 36 +++++++++-- models/id.go | 2 +- plugins/configs/api/controller.go | 87 ++++++++++++++++++++++++-- plugins/configs/api/controller_test.go | 32 ++-------- plugins/configs/api/listener.go | 2 +- 7 files changed, 130 insertions(+), 43 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index b37762d427839..356d54fd7037a 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -179,7 +179,8 @@ func (a *Agent) RunWithAPI(outputCancel context.CancelFunc) { a.setState(agentStateShuttingDown) // wait for all plugins to stop - a.waitForPluginsToStop() + a.waitForMainPluginsToStop() + a.waitForConfigPluginsToStop() log.Printf("D! [agent] Stopped Successfully") } @@ -526,7 +527,8 @@ func (a *Agent) RunConfigPlugin(ctx context.Context, plugin config.ConfigPlugin) a.configPluginUnit.Unlock() <-ctx.Done() - //TODO: we might want to wait for all other plugins to close? + a.waitForMainPluginsToStop() + if err := plugin.Close(); err != nil { log.Printf("E! [agent] Configuration plugin failed to close: %v", err) } @@ -845,7 +847,8 @@ func (a *Agent) Context() context.Context { return a.ctx } -func (a *Agent) waitForPluginsToStop() { +// waitForMainPluginsToStop waits for inputs, processors, and outputs to stop. +func (a *Agent) waitForMainPluginsToStop() { for { a.inputGroupUnit.Lock() if len(a.inputGroupUnit.inputUnits) > 0 { @@ -881,7 +884,10 @@ func (a *Agent) waitForPluginsToStop() { break } a.outputGroupUnit.Unlock() +} +// waitForConfigPluginsToStop waits for config plugins to stop +func (a *Agent) waitForConfigPluginsToStop() { for { a.configPluginUnit.Lock() if len(a.configPluginUnit.plugins) > 0 { diff --git a/config/config.go b/config/config.go index 9c33b0a103bf8..e1d93a3cf67bc 100644 --- a/config/config.go +++ b/config/config.go @@ -1619,8 +1619,6 @@ func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, Filter: filter, } - // TODO: support FieldPass/FieldDrop on outputs - c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval) c.getFieldDuration(tbl, "flush_jitter", &oc.FlushJitter) diff --git a/config/config_test.go b/config/config_test.go index 08166ca9abb05..66e88bcca5331 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -14,6 +14,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agenthelper" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/models" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" "github.com/influxdata/telegraf/plugins/common/tls" @@ -458,6 +459,27 @@ func TestConfig_DefaultOrderingProcessorsWithAggregators(t *testing.T) { require.EqualValues(t, expected, actual) } +func TestConfig_OutputFieldPass(t *testing.T) { + now := time.Date(2021, 7, 5, 14, 45, 0, 0, time.UTC) + output := outputs.Outputs["http"]().(*MockupOuputPlugin) + + filter := models.Filter{ + FieldPass: []string{"good"}, + } + require.NoError(t, filter.Compile()) + outputConfig := &models.OutputConfig{ + Name: "http-test", + Filter: filter, + } + ro := models.NewRunningOutput(output, outputConfig, 1, 10) + ro.AddMetric(metric.New("test", map[string]string{"tag": "foo"}, map[string]interface{}{"good": "bar", "ignored": "woo"}, now)) + require.NoError(t, ro.Write()) + ro.Close() + require.Len(t, output.metrics, 1) + require.True(t, output.metrics[0].HasField("good")) + require.False(t, output.metrics[0].HasField("ignored")) +} + /*** Mockup INPUT plugin for testing to avoid cyclic dependencies ***/ type MockupInputPlugin struct { Servers []string `toml:"servers"` @@ -488,13 +510,17 @@ type MockupOuputPlugin struct { NamespacePrefix string `toml:"namespace_prefix"` Log telegraf.Logger `toml:"-"` tls.ClientConfig + metrics []telegraf.Metric } -func (m *MockupOuputPlugin) Connect() error { return nil } -func (m *MockupOuputPlugin) Close() error { return nil } -func (m *MockupOuputPlugin) Description() string { return "Mockup test output plugin" } -func (m *MockupOuputPlugin) SampleConfig() string { return "Mockup test output plugin" } -func (m *MockupOuputPlugin) Write(metrics []telegraf.Metric) error { return nil } +func (m *MockupOuputPlugin) Connect() error { return nil } +func (m *MockupOuputPlugin) Close() error { return nil } +func (m *MockupOuputPlugin) Description() string { return "Mockup test output plugin" } +func (m *MockupOuputPlugin) SampleConfig() string { return "Mockup test output plugin" } +func (m *MockupOuputPlugin) Write(metrics []telegraf.Metric) error { + m.metrics = append(m.metrics, metrics...) + return nil +} // Register the mockup plugin on loading func init() { diff --git a/models/id.go b/models/id.go index 2e9dbd14c1f98..624b7911ca3d8 100644 --- a/models/id.go +++ b/models/id.go @@ -1,7 +1,7 @@ package models import ( - "math/rand" // TODO: maybe switch to crypto/rand + "math/rand" "sync/atomic" ) diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index e41e688e7bb96..09399ce0b1dbc 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -106,7 +106,64 @@ func (a *api) ListPluginTypes() []PluginConfigTypeInfo { for _, name := range inputNames { creator := inputs.Inputs[name] cfg := PluginConfigTypeInfo{ - Name: name, + Name: "inputs." + name, + Config: map[string]FieldConfig{}, + } + + p := creator() + getFieldConfig(p, cfg.Config) + + result = append(result, cfg) + } + + processorNames := []string{} + for name := range processors.Processors { + processorNames = append(processorNames, name) + } + sort.Strings(processorNames) + + for _, name := range processorNames { + creator := processors.Processors[name] + cfg := PluginConfigTypeInfo{ + Name: "processors." + name, + Config: map[string]FieldConfig{}, + } + + p := creator() + getFieldConfig(p, cfg.Config) + + result = append(result, cfg) + } + + aggregatorNames := []string{} + for name := range aggregators.Aggregators { + aggregatorNames = append(aggregatorNames, name) + } + sort.Strings(aggregatorNames) + + for _, name := range aggregatorNames { + creator := aggregators.Aggregators[name] + cfg := PluginConfigTypeInfo{ + Name: "aggregators." + name, + Config: map[string]FieldConfig{}, + } + + p := creator() + getFieldConfig(p, cfg.Config) + + result = append(result, cfg) + } + + outputNames := []string{} + for name := range outputs.Outputs { + outputNames = append(outputNames, name) + } + sort.Strings(outputNames) + + for _, name := range outputNames { + creator := outputs.Outputs[name] + cfg := PluginConfigTypeInfo{ + Name: "outputs." + name, Config: map[string]FieldConfig{}, } @@ -115,7 +172,7 @@ func (a *api) ListPluginTypes() []PluginConfigTypeInfo { result = append(result, cfg) } - // TODO: add more types? + return result } @@ -155,7 +212,7 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { getFieldConfig(v.Config, p.Config) runningPlugins = append(runningPlugins, p) } - // TODO: add more types? + if runningPlugins == nil { return []Plugin{} } @@ -163,11 +220,28 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { } func (a *api) UpdatePlugin(id models.PluginID, cfg PluginConfigCreate) error { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() // TODO: shut down plugin and start a new plugin with the same id. - return nil + if err := a.DeletePlugin(id); err != nil { + return err + } + // wait for plugin to stop before recreating it with the same ID, otherwise we'll have issues. + for a.GetPluginStatus(id) != models.PluginStateDead { + select { + case <-ctx.Done(): + // plugin didn't stop in time.. do we recreate it anyway? + return errors.New("timed out shutting down plugin for update") + case <-time.After(100 * time.Millisecond): + // try again + } + } + _, err := a.CreatePlugin(cfg, id) + return err } -func (a *api) CreatePlugin(cfg PluginConfigCreate) (models.PluginID, error) { +// CreatePlugin creates a new plugin from a specified config. forcedID should be left blank when used by users via the API. +func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (models.PluginID, error) { log.Printf("I! [configapi] creating plugin %q", cfg.Name) parts := strings.Split(cfg.Name, ".") @@ -229,6 +303,9 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate) (models.PluginID, error) { } rp := models.NewRunningInput(i, pluginConfig) + if len(forcedID) > 0 { + rp.ID = forcedID.Uint64() + } rp.SetDefaultTags(a.config.Tags) if err := rp.Init(); err != nil { diff --git a/plugins/configs/api/controller_test.go b/plugins/configs/api/controller_test.go index 9c312d871eeb9..5234c37f65646 100644 --- a/plugins/configs/api/controller_test.go +++ b/plugins/configs/api/controller_test.go @@ -41,7 +41,7 @@ func TestListPluginTypes(t *testing.T) { // find the gnmi plugin var gnmi PluginConfigTypeInfo for _, conf := range pluginConfigs { - if conf.Name == "gnmi" { + if conf.Name == "inputs.gnmi" { gnmi = conf break } @@ -50,7 +50,7 @@ func TestListPluginTypes(t *testing.T) { // find the cloudwatch plugin var cloudwatch PluginConfigTypeInfo for _, conf := range pluginConfigs { - if conf.Name == "cloudwatch" { + if conf.Name == "inputs.cloudwatch" { cloudwatch = conf break } @@ -90,13 +90,9 @@ func TestInputPluginLifecycle(t *testing.T) { api, outputCancel := newAPI(ctx, cfg, a) defer outputCancel() - // t.Log("Running") - go a.RunWithAPI(outputCancel) - // TODO: defer a.Shutdown() // create - // t.Log("Create plugin") newPluginID, err := api.CreatePlugin(PluginConfigCreate{ Name: "inputs.cpu", Config: map[string]interface{}{ @@ -105,27 +101,23 @@ func TestInputPluginLifecycle(t *testing.T) { "collect_cpu_time": true, "report_active": true, }, - }) + }, "") require.NoError(t, err) require.NotZero(t, len(newPluginID)) // get plugin status - // t.Log("wait for start state") waitForStatus(t, api, newPluginID, "running", 20*time.Second) // list running - // t.Log("List running plugins") runningPlugins := api.ListRunningPlugins() require.Len(t, runningPlugins, 1) status := api.GetPluginStatus(newPluginID) require.Equal(t, "running", status.String()) // delete - // t.Log("delete plugin") err = api.DeletePlugin(newPluginID) require.NoError(t, err) - // t.Log("wait for dead state") waitForStatus(t, api, newPluginID, "dead", 300*time.Millisecond) // get plugin status until dead @@ -133,7 +125,6 @@ func TestInputPluginLifecycle(t *testing.T) { require.Equal(t, "dead", status.String()) // list running should have none - // t.Log("list plugins") runningPlugins = api.ListRunningPlugins() require.Len(t, runningPlugins, 0) } @@ -148,17 +139,14 @@ func TestAllPluginLifecycle(t *testing.T) { api, outputCancel := newAPI(runCtx, cfg, a) defer outputCancel() - // t.Log("Running") go a.RunWithAPI(outputCancel) // create - // t.Log("Create plugin") pluginIDs := []models.PluginID{} newPluginID, err := api.CreatePlugin(PluginConfigCreate{ Name: "inputs.cpu", Config: map[string]interface{}{}, - }) - // t.Log("inputs.cpu", newPluginID) + }, "") pluginIDs = append(pluginIDs, newPluginID) require.NoError(t, err) require.NotZero(t, len(newPluginID)) @@ -171,7 +159,7 @@ func TestAllPluginLifecycle(t *testing.T) { "dest": "a_host", }}, }, - }) + }, "") require.NoError(t, err) pluginIDs = append(pluginIDs, newPluginID) require.NotZero(t, len(newPluginID)) @@ -181,32 +169,27 @@ func TestAllPluginLifecycle(t *testing.T) { Config: map[string]interface{}{ "files": []string{"stdout"}, }, - }) - // t.Log("outputs.file", newPluginID) + }, "") pluginIDs = append(pluginIDs, newPluginID) require.NoError(t, err) require.NotZero(t, len(newPluginID)) for _, id := range pluginIDs { - // t.Log("waiting for plugin", id) waitForStatus(t, api, id, "running", 10*time.Second) } // list running - // t.Log("List running plugins") runningPlugins := api.ListRunningPlugins() require.Len(t, runningPlugins, 3) time.Sleep(5 * time.Second) // delete - // t.Log("delete plugins") for _, id := range pluginIDs { err = api.DeletePlugin(id) require.NoError(t, err) } - // t.Log("wait for dead state") for _, id := range pluginIDs { waitForStatus(t, api, id, "dead", 300*time.Millisecond) } @@ -226,7 +209,6 @@ func waitForStatus(t *testing.T, api *api, newPluginID models.PluginID, waitStat timeoutAt := time.Now().Add(timeout) for timeoutAt.After(time.Now()) { status := api.GetPluginStatus(newPluginID) - // t.Log("plugin", newPluginID, "status", status) if status.String() == waitStatus { return } @@ -343,8 +325,6 @@ func TestSetFieldConfig(t *testing.T) { require.Equal(t, expected, icfg) } -// TODO: test different plugin types - func TestExampleWorstPlugin(t *testing.T) { input := map[string]interface{}{ "elapsed": "3s", diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index 3063bb99c959e..463e50dfa3d2a 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -60,7 +60,7 @@ func (s *ConfigAPIService) createPlugin(w http.ResponseWriter, req *http.Request w.WriteHeader(http.StatusBadRequest) return } - id, err := s.api.CreatePlugin(cfg) + id, err := s.api.CreatePlugin(cfg, "") if err != nil { s.Log.Error("error creating plugin %v", err) w.WriteHeader(http.StatusBadRequest) From 034e6e7c051cffe10ce8b1776d8a3a02f96ffe03 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 10 Aug 2021 11:31:07 -0500 Subject: [PATCH 41/48] fix outputs not shutting down properly --- agent/agent.go | 30 +++++++----------- config/config.go | 7 ++-- config/plugins.go | 2 +- plugins/configs/api/controller.go | 44 ++++++++++++++++++-------- plugins/configs/api/controller_test.go | 16 ++++++---- plugins/configs/api/listener_test.go | 6 +++- plugins/configs/api/plugin.go | 7 ++-- 7 files changed, 66 insertions(+), 46 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 356d54fd7037a..92476da945911 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -158,12 +158,12 @@ type inputGroupUnit struct { // they maintain their own internal buffers rather than depending on metrics buffered // in a channel) func (a *Agent) RunWithAPI(outputCancel context.CancelFunc) { - go func() { + go func(outputCancel context.CancelFunc) { a.runOutputFanout() // then the fanout closes, notify the outputs that they won't be receiving // more metrics via this cancel function. - outputCancel() - }() + outputCancel() // triggers outputs to finish up + }(outputCancel) log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ "Flush Interval:%s", @@ -312,13 +312,6 @@ retry: } } -// stopRunningOutputs stops all running outputs. -func stopRunningOutputs(outputs []*models.RunningOutput) { - for _, output := range outputs { - output.Close() - } -} - // gather runs an input's gather function periodically until the context is // done. func (a *Agent) gatherLoop( @@ -635,10 +628,9 @@ func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) // RunOutput runs an output; note the context should be a special context that // only cancels when it's time for the outputs to close: when the main context // has closed AND all the input and processor plugins are done. -func (a *Agent) RunOutput(ctx context.Context, output *models.RunningOutput) { - var cancel context.CancelFunc +func (a *Agent) RunOutput(outputCtx context.Context, output *models.RunningOutput) { // wrap with a cancel context so that the StopOutput can stop this individual output without stopping all the outputs. - ctx, cancel = context.WithCancel(ctx) + ctx, cancel := context.WithCancel(outputCtx) defer cancel() a.outputGroupUnit.Lock() @@ -707,6 +699,7 @@ func (a *Agent) runOutputFanout() { } } } + // closing of outputs is done by RunOutput } // flushLoop runs an output's flush function periodically until the context is done. @@ -728,11 +721,9 @@ func (a *Agent) flushLoop( for { // Favor shutdown over other methods. - select { - case <-ctx.Done(): + if ctx.Err() != nil { logError(a.flushOnce(output, ticker, output.Write)) return - default: } select { @@ -859,7 +850,11 @@ func (a *Agent) waitForMainPluginsToStop() { } break } - close(a.inputGroupUnit.dst) + + if a.inputGroupUnit.dst != nil { + close(a.inputGroupUnit.dst) + a.inputGroupUnit.dst = nil + } a.inputGroupUnit.Unlock() for { time.Sleep(100 * time.Millisecond) @@ -876,7 +871,6 @@ func (a *Agent) waitForMainPluginsToStop() { for { a.outputGroupUnit.Lock() if len(a.outputGroupUnit.outputs) > 0 { - // fmt.Printf("waiting for %d outputs\n", len(a.outputGroupUnit.outputs)) a.outputGroupUnit.Unlock() time.Sleep(100 * time.Millisecond) continue diff --git a/config/config.go b/config/config.go index e1d93a3cf67bc..2b4a95c0066c8 100644 --- a/config/config.go +++ b/config/config.go @@ -899,7 +899,7 @@ func (c *Config) LoadConfigData(ctx context.Context, outputCtx context.Context, switch pluginSubTable := pluginVal.(type) { case []*ast.Table: for _, t := range pluginSubTable { - if err = c.addConfigPlugin(ctx, pluginName, t); err != nil { + if err = c.addConfigPlugin(ctx, outputCtx, pluginName, t); err != nil { return fmt.Errorf("Error parsing %s, %s", pluginName, err) } } @@ -1019,7 +1019,8 @@ func parseConfig(contents []byte) (*ast.Table, error) { return toml.Parse(contents) } -func (c *Config) addConfigPlugin(ctx context.Context, name string, table *ast.Table) error { +// nolint:revive +func (c *Config) addConfigPlugin(ctx context.Context, outputCtx context.Context, name string, table *ast.Table) error { creator, ok := ConfigPlugins[name] if !ok { return fmt.Errorf("Undefined but requested config plugin: %s", name) @@ -1047,7 +1048,7 @@ func (c *Config) addConfigPlugin(ctx context.Context, name string, table *ast.Ta logger := models.NewLogger("config", name, name) models.SetLoggerOnPlugin(configPlugin, logger) - if err := configPlugin.Init(ctx, c, c.controller); err != nil { + if err := configPlugin.Init(ctx, outputCtx, c, c.controller); err != nil { return err } diff --git a/config/plugins.go b/config/plugins.go index 475917a673377..5dfa774209862 100644 --- a/config/plugins.go +++ b/config/plugins.go @@ -18,7 +18,7 @@ var ( // ConfigPlugin is the interface for implemnting plugins that change how Telegraf works type ConfigPlugin interface { GetName() string - Init(ctx context.Context, cfg *Config, agent AgentController) error + Init(ctx context.Context, outputCtx context.Context, cfg *Config, agent AgentController) error Close() error } diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index 09399ce0b1dbc..4b3c1fa8d17b5 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -2,6 +2,7 @@ package api import ( "context" + "encoding/json" "errors" "fmt" "log" // nolint:revive @@ -33,14 +34,15 @@ type api struct { outputCtx context.Context } -func newAPI(ctx context.Context, cfg *config.Config, agent config.AgentController) (_ *api, outputCancel context.CancelFunc) { +// nolint:revive +func newAPI(ctx context.Context, outputCtx context.Context, cfg *config.Config, agent config.AgentController) *api { c := &api{ - config: cfg, - agent: agent, - ctx: ctx, + config: cfg, + agent: agent, + ctx: ctx, + outputCtx: outputCtx, } - c.outputCtx, outputCancel = context.WithCancel(context.Background()) - return c, outputCancel + return c } // PluginConfig is a plugin name and details about the config fields. @@ -92,7 +94,7 @@ type Plugin struct { ID models.PluginID Name string // State() - Config map[string]FieldConfig + Config map[string]interface{} } func (a *api) ListPluginTypes() []PluginConfigTypeInfo { @@ -184,32 +186,40 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { p := Plugin{ ID: idToString(v.ID), Name: v.Config.Name, - Config: map[string]FieldConfig{}, + Config: map[string]interface{}{}, } - getFieldConfig(v.Config, p.Config) + getFieldConfigValues(v.Config, p.Config) + getFieldConfigValues(v.Input, p.Config) runningPlugins = append(runningPlugins, p) } for _, v := range a.agent.RunningProcessors() { p := Plugin{ ID: idToString(v.GetID()), Name: v.LogName(), - Config: map[string]FieldConfig{}, + Config: map[string]interface{}{}, } val := reflect.ValueOf(v) if val.Kind() == reflect.Ptr { val = val.Elem() } pluginCfg := val.FieldByName("Config").Interface() - getFieldConfig(pluginCfg, p.Config) + getFieldConfigValues(pluginCfg, p.Config) + if proc := val.FieldByName("Processor"); proc.IsValid() && !proc.IsNil() { + getFieldConfigValues(proc.Interface(), p.Config) + } + if agg := val.FieldByName("Aggregator"); agg.IsValid() && !agg.IsNil() { + getFieldConfigValues(agg.Interface(), p.Config) + } runningPlugins = append(runningPlugins, p) } for _, v := range a.agent.RunningOutputs() { p := Plugin{ ID: idToString(v.ID), Name: v.Config.Name, - Config: map[string]FieldConfig{}, + Config: map[string]interface{}{}, } - getFieldConfig(v.Config, p.Config) + getFieldConfigValues(v.Config, p.Config) + getFieldConfigValues(v.Output, p.Config) runningPlugins = append(runningPlugins, p) } @@ -554,7 +564,8 @@ func getFieldByName(destStruct reflect.Value, fieldName string) (reflect.Value, return reflect.ValueOf(nil), reflect.TypeOf(nil) } -// getFieldConfig expects a plugin, p, (of any type) and a map to populate. +// getFieldConfig builds FieldConfig objects based on the structure of a plugin's struct +// it expects a plugin, p, (of any type) and a map to populate. // it calls itself recursively so p must be an interface{} func getFieldConfig(p interface{}, cfg map[string]FieldConfig) { structVal := reflect.ValueOf(p) @@ -657,6 +668,11 @@ func getFieldConfig(p interface{}, cfg map[string]FieldConfig) { } } +func getFieldConfigValues(p interface{}, cfg map[string]interface{}) { + b, _ := json.Marshal(p) + _ = json.Unmarshal(b, &cfg) +} + func setObject(from, to reflect.Value, destType reflect.Type) error { if from.Kind() == reflect.Interface { from = reflect.ValueOf(from.Interface()) diff --git a/plugins/configs/api/controller_test.go b/plugins/configs/api/controller_test.go index 5234c37f65646..874e9f26088ff 100644 --- a/plugins/configs/api/controller_test.go +++ b/plugins/configs/api/controller_test.go @@ -30,8 +30,7 @@ func TestListPluginTypes(t *testing.T) { cfg := config.NewConfig() // initalizes API a := agent.NewAgent(context.Background(), cfg) - api, cancel := newAPI(context.Background(), cfg, a) - defer cancel() + api := newAPI(context.Background(), context.Background(), cfg, a) pluginConfigs := api.ListPluginTypes() require.Greater(t, len(pluginConfigs), 10) @@ -85,10 +84,12 @@ func TestInputPluginLifecycle(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + outputCtx, outputCancel := context.WithCancel(context.Background()) + defer outputCancel() + cfg := config.NewConfig() // initalizes API a := agent.NewAgent(ctx, cfg) - api, outputCancel := newAPI(ctx, cfg, a) - defer outputCancel() + api := newAPI(ctx, outputCtx, cfg, a) go a.RunWithAPI(outputCancel) @@ -127,17 +128,20 @@ func TestInputPluginLifecycle(t *testing.T) { // list running should have none runningPlugins = api.ListRunningPlugins() require.Len(t, runningPlugins, 0) + require.Equal(t, []Plugin{}, runningPlugins) } func TestAllPluginLifecycle(t *testing.T) { runCtx, cancel := context.WithCancel(context.Background()) defer cancel() + outputCtx, outputCancel := context.WithCancel(context.Background()) + defer outputCancel() + cfg := config.NewConfig() a := agent.NewAgent(context.Background(), cfg) - api, outputCancel := newAPI(runCtx, cfg, a) - defer outputCancel() + api := newAPI(runCtx, outputCtx, cfg, a) go a.RunWithAPI(outputCancel) diff --git a/plugins/configs/api/listener_test.go b/plugins/configs/api/listener_test.go index 10b24d260581f..4badb44fabef5 100644 --- a/plugins/configs/api/listener_test.go +++ b/plugins/configs/api/listener_test.go @@ -30,9 +30,13 @@ func TestStatus(t *testing.T) { func TestStartPlugin(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + + outputCtx, outputCancel := context.WithCancel(context.Background()) + defer outputCancel() + c := config.NewConfig() a := agent.NewAgent(ctx, c) - api, outputCancel := newAPI(ctx, c, a) + api := newAPI(ctx, outputCtx, c, a) go a.RunWithAPI(outputCancel) s := &ConfigAPIService{ diff --git a/plugins/configs/api/plugin.go b/plugins/configs/api/plugin.go index 6a5df2eee8f22..e1c96fdb23011 100644 --- a/plugins/configs/api/plugin.go +++ b/plugins/configs/api/plugin.go @@ -17,7 +17,6 @@ type ConfigAPIPlugin struct { tls.ServerConfig api *api - cancel context.CancelFunc server *ConfigAPIService Log telegraf.Logger `toml:"-"` @@ -28,8 +27,10 @@ func (a *ConfigAPIPlugin) GetName() string { return "api" } -func (a *ConfigAPIPlugin) Init(ctx context.Context, cfg *config.Config, agent config.AgentController) error { - a.api, a.cancel = newAPI(ctx, cfg, agent) +// Init initializes the config api plugin. +// nolint:revive +func (a *ConfigAPIPlugin) Init(ctx context.Context, outputCtx context.Context, cfg *config.Config, agent config.AgentController) error { + a.api = newAPI(ctx, outputCtx, cfg, agent) if a.Storage == nil { a.Log.Warn("initializing config-api without storage, changes via the api will not be persisted.") } else { From 5d95894a232242765aaad0140d67a7746e835242 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 10 Aug 2021 15:27:25 -0400 Subject: [PATCH 42/48] log write errors --- plugins/configs/api/listener.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index 463e50dfa3d2a..7f02b9edd3217 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -90,7 +90,11 @@ func (s *ConfigAPIService) listPlugins(w http.ResponseWriter, req *http.Request) return } w.Header().Set("Content-Type", "application/json") - _, _ = w.Write(bytes) + _, err = w.Write(bytes) + if err != nil { + log.Printf("W! error writing to connection: %v", err) + return + } } func (s *ConfigAPIService) runningPlugins(w http.ResponseWriter, req *http.Request) { @@ -104,7 +108,11 @@ func (s *ConfigAPIService) runningPlugins(w http.ResponseWriter, req *http.Reque return } w.Header().Set("Content-Type", "application/json") - _, _ = w.Write(bytes) + _, err = w.Write(bytes) + if err != nil { + log.Printf("W! error writing to connection: %v", err) + return + } } func (s *ConfigAPIService) pluginStatus(w http.ResponseWriter, req *http.Request) { From c1f904a81c2cd32602e2f910f192cb66396aba68 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Tue, 10 Aug 2021 15:37:28 -0400 Subject: [PATCH 43/48] more log write errors --- plugins/configs/api/listener.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index 7f02b9edd3217..f88472461f462 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -45,7 +45,11 @@ func (s *ConfigAPIService) status(w http.ResponseWriter, req *http.Request) { if req.Body != nil { defer req.Body.Close() } - _, _ = w.Write([]byte("ok")) + _, err := w.Write([]byte("ok")) + if err != nil { + log.Printf("W! error writing to connection: %v", err) + return + } } func (s *ConfigAPIService) createPlugin(w http.ResponseWriter, req *http.Request) { @@ -67,7 +71,11 @@ func (s *ConfigAPIService) createPlugin(w http.ResponseWriter, req *http.Request return } w.Header().Set("Content-Type", "application/json") - _, _ = w.Write([]byte(fmt.Sprintf(`{"id": "%s"}`, id))) + _, err = w.Write([]byte(fmt.Sprintf(`{"id": "%s"}`, id))) + if err != nil { + log.Printf("W! error writing to connection: %v", err) + return + } } func (s *ConfigAPIService) Start() { From ffa54dbc976f7990d04d535fe60c713bc3587fd4 Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Tue, 10 Aug 2021 14:38:29 -0500 Subject: [PATCH 44/48] resolve feedback --- plugins/configs/api/controller.go | 4 ++-- plugins/configs/api/listener.go | 3 --- plugins/configs/api/listener_test.go | 11 ++++++++++- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index 4b3c1fa8d17b5..5a0e99db1c80a 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -185,7 +185,7 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { for _, v := range a.agent.RunningInputs() { p := Plugin{ ID: idToString(v.ID), - Name: v.Config.Name, + Name: v.LogName(), Config: map[string]interface{}{}, } getFieldConfigValues(v.Config, p.Config) @@ -215,7 +215,7 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { for _, v := range a.agent.RunningOutputs() { p := Plugin{ ID: idToString(v.ID), - Name: v.Config.Name, + Name: v.LogName(), Config: map[string]interface{}{}, } getFieldConfigValues(v.Config, p.Config) diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index f88472461f462..43695ceff03fb 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -79,9 +79,6 @@ func (s *ConfigAPIService) createPlugin(w http.ResponseWriter, req *http.Request } func (s *ConfigAPIService) Start() { - // if s.server.TLSConfig != nil { - // s.server.ListenAndServeTLS() - // } go func() { _ = s.server.ListenAndServe() }() diff --git a/plugins/configs/api/listener_test.go b/plugins/configs/api/listener_test.go index 4badb44fabef5..b1a6375fd18da 100644 --- a/plugins/configs/api/listener_test.go +++ b/plugins/configs/api/listener_test.go @@ -8,9 +8,11 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" "github.com/stretchr/testify/require" ) @@ -108,9 +110,12 @@ func TestStartPlugin(t *testing.T) { func TestStopPlugin(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + outputCtx, outputCancel := context.WithCancel(context.Background()) + defer outputCancel() + c := config.NewConfig() a := agent.NewAgent(ctx, c) - api, outputCancel := newAPI(ctx, c, a) + api := newAPI(ctx, outputCtx, c, a) go a.RunWithAPI(outputCancel) s := &ConfigAPIService{ @@ -137,6 +142,8 @@ func TestStopPlugin(t *testing.T) { require.Regexp(t, `^[\da-f]{8}\d{8}$`, createResp.ID) + waitForStatus(t, api, models.PluginID(createResp.ID), models.PluginStateRunning.String(), 2*time.Second) + resp, err = http.Get(srv.URL + "/plugins/running") require.NoError(t, err) require.EqualValues(t, 200, resp.StatusCode) @@ -162,6 +169,8 @@ func TestStopPlugin(t *testing.T) { require.EqualValues(t, 200, resp.StatusCode) require.NoError(t, err) + waitForStatus(t, api, models.PluginID(createResp.ID), models.PluginStateDead.String(), 2*time.Second) + resp, err = http.Get(srv.URL + "/plugins/running") require.NoError(t, err) require.EqualValues(t, 200, resp.StatusCode) From 1cdd3d8b3853953e5612e9943c7e2362e2314ce8 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:38:29 -0400 Subject: [PATCH 45/48] status code handling, refactor field names to be snake case --- config/types.go | 32 ++++ plugins/configs/api/controller.go | 254 +++++++++++++++++++------ plugins/configs/api/controller_test.go | 197 +++++++++++++++++-- plugins/configs/api/listener.go | 37 ++-- plugins/configs/api/listener_test.go | 52 ++++- 5 files changed, 494 insertions(+), 78 deletions(-) diff --git a/config/types.go b/config/types.go index 7c1c50b9e3690..eb76fd8641a71 100644 --- a/config/types.go +++ b/config/types.go @@ -2,6 +2,7 @@ package config import ( "bytes" + "fmt" "strconv" "time" @@ -86,3 +87,34 @@ func (s *Size) UnmarshalTOML(b []byte) error { func (s *Size) UnmarshalText(text []byte) error { return s.UnmarshalTOML(text) } + +func (s *Size) MarshalText() []byte { + if s == nil { + return []byte{} + } + v := *s + if v == 0 { + return []byte("0") + } + if v%1024 > 0 { + return []byte(fmt.Sprintf("%dB", v)) + } + v = v / 1024 + if v%1024 > 0 { + return []byte(fmt.Sprintf("%dKiB", v)) + } + v = v / 1024 + if v%1024 > 0 { + return []byte(fmt.Sprintf("%dMiB", v)) + } + v = v / 1024 + if v%1024 > 0 { + return []byte(fmt.Sprintf("%dGiB", v)) + } + v = v / 1024 + if v%1024 > 0 { + return []byte(fmt.Sprintf("%dTiB", v)) + } + v = v / 1024 + return []byte(fmt.Sprintf("%dPiB", v)) +} diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index 5a0e99db1c80a..a55a3fd459c4e 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -2,11 +2,11 @@ package api import ( "context" - "encoding/json" "errors" "fmt" "log" // nolint:revive "reflect" + "regexp" "sort" "strings" "time" @@ -188,8 +188,8 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { Name: v.LogName(), Config: map[string]interface{}{}, } - getFieldConfigValues(v.Config, p.Config) - getFieldConfigValues(v.Input, p.Config) + getFieldConfigValuesFromStruct(v.Config, p.Config) + getFieldConfigValuesFromStruct(v.Input, p.Config) runningPlugins = append(runningPlugins, p) } for _, v := range a.agent.RunningProcessors() { @@ -203,12 +203,12 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { val = val.Elem() } pluginCfg := val.FieldByName("Config").Interface() - getFieldConfigValues(pluginCfg, p.Config) + getFieldConfigValuesFromStruct(pluginCfg, p.Config) if proc := val.FieldByName("Processor"); proc.IsValid() && !proc.IsNil() { - getFieldConfigValues(proc.Interface(), p.Config) + getFieldConfigValuesFromStruct(proc.Interface(), p.Config) } if agg := val.FieldByName("Aggregator"); agg.IsValid() && !agg.IsNil() { - getFieldConfigValues(agg.Interface(), p.Config) + getFieldConfigValuesFromStruct(agg.Interface(), p.Config) } runningPlugins = append(runningPlugins, p) } @@ -218,8 +218,8 @@ func (a *api) ListRunningPlugins() (runningPlugins []Plugin) { Name: v.LogName(), Config: map[string]interface{}{}, } - getFieldConfigValues(v.Config, p.Config) - getFieldConfigValues(v.Output, p.Config) + getFieldConfigValuesFromStruct(v.Config, p.Config) + getFieldConfigValuesFromStruct(v.Output, p.Config) runningPlugins = append(runningPlugins, p) } @@ -261,13 +261,13 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo // add an input input, ok := inputs.Inputs[name] if !ok { - return "", fmt.Errorf("Error finding plugin with name %s", name) + return "", fmt.Errorf("%w: finding plugin with name %s", ErrNotFound, name) } // create a copy i := input() // set the config if err := setFieldConfig(cfg.Config, i); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } // get parser! @@ -278,11 +278,11 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo DataFormat: "influx", } if err := setFieldConfig(cfg.Config, pc); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } parser, err := parsers.NewParser(pc) if err != nil { - return "", err + return "", fmt.Errorf("%w: setting parser %s", ErrBadRequest, err) } t.SetParser(parser) } @@ -294,7 +294,7 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo DataFormat: "influx", } if err := setFieldConfig(cfg.Config, pc); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } t.SetParserFunc(func() (parsers.Parser, error) { @@ -305,11 +305,11 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo // start it and put it into the agent manager? pluginConfig := &models.InputConfig{Name: name} if err := setFieldConfig(cfg.Config, pluginConfig); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } if err := setFieldConfig(cfg.Config, &pluginConfig.Filter); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } rp := models.NewRunningInput(i, pluginConfig) @@ -319,7 +319,7 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo rp.SetDefaultTags(a.config.Tags) if err := rp.Init(); err != nil { - return "", fmt.Errorf("could not initialize plugin %w", err) + return "", fmt.Errorf("%w: could not initialize plugin %s", ErrBadRequest, err) } a.agent.AddInput(rp) @@ -331,22 +331,22 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo // add an output output, ok := outputs.Outputs[name] if !ok { - return "", fmt.Errorf("Error finding plugin with name %s", name) + return "", fmt.Errorf("%w: Error finding plugin with name %s", ErrNotFound, name) } // create a copy o := output() // set the config if err := setFieldConfig(cfg.Config, o); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } // start it and put it into the agent manager? pluginConfig := &models.OutputConfig{Name: name} if err := setFieldConfig(cfg.Config, pluginConfig); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } if err := setFieldConfig(cfg.Config, &pluginConfig.Filter); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } if t, ok := o.(serializers.SerializerOutput); ok { @@ -355,11 +355,11 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo DataFormat: "influx", } if err := setFieldConfig(cfg.Config, sc); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } serializer, err := serializers.NewSerializer(sc) if err != nil { - return "", err + return "", fmt.Errorf("%w: setting serializer %s", ErrBadRequest, err) } t.SetSerializer(serializer) } @@ -367,7 +367,7 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo ro := models.NewRunningOutput(o, pluginConfig, a.config.Agent.MetricBatchSize, a.config.Agent.MetricBufferLimit) if err := ro.Init(); err != nil { - return "", fmt.Errorf("could not initialize plugin %w", err) + return "", fmt.Errorf("%w: could not initialize plugin %s", ErrBadRequest, err) } a.agent.AddOutput(ro) @@ -378,13 +378,13 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo case "aggregators": aggregator, ok := aggregators.Aggregators[name] if !ok { - return "", fmt.Errorf("Error finding aggregator plugin with name %s", name) + return "", fmt.Errorf("%w: Error finding aggregator plugin with name %s", ErrNotFound, name) } agg := aggregator() // set the config if err := setFieldConfig(cfg.Config, agg); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field", ErrBadRequest) } aggCfg := &models.AggregatorConfig{ Name: name, @@ -393,16 +393,16 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo Grace: time.Second * 0, } if err := setFieldConfig(cfg.Config, aggCfg); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field", ErrBadRequest) } if err := setFieldConfig(cfg.Config, &aggCfg.Filter); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field", ErrBadRequest) } ra := models.NewRunningAggregator(agg, aggCfg) if err := ra.Init(); err != nil { - return "", fmt.Errorf("could not initialize plugin %w", err) + return "", fmt.Errorf("%w: could not initialize plugin %s", ErrBadRequest, err) } a.agent.AddProcessor(ra) go a.agent.RunProcessor(ra) @@ -412,7 +412,7 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo case "processors": processor, ok := processors.Processors[name] if !ok { - return "", fmt.Errorf("Error finding processor plugin with name %s", name) + return "", fmt.Errorf("%w: Error finding processor plugin with name %s", ErrNotFound, name) } // create a copy p := processor() @@ -422,21 +422,21 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo } // set the config if err := setFieldConfig(cfg.Config, rootp); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field", ErrBadRequest) } // start it and put it into the agent manager? pluginConfig := &models.ProcessorConfig{Name: name} if err := setFieldConfig(cfg.Config, pluginConfig); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field", ErrBadRequest) } if err := setFieldConfig(cfg.Config, &pluginConfig.Filter); err != nil { - return "", err + return "", fmt.Errorf("%w: setting field", ErrBadRequest) } rp := models.NewRunningProcessor(p, pluginConfig) if err := rp.Init(); err != nil { - return "", fmt.Errorf("could not initialize plugin %w", err) + return "", fmt.Errorf("%w: could not initialize plugin %s", ErrBadRequest, err) } a.agent.AddProcessor(rp) @@ -445,7 +445,7 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo return idToString(rp.ID), nil default: - return "", errors.New("Unknown plugin type") + return "", fmt.Errorf("%w: Unknown plugin type", ErrNotFound) } } @@ -490,7 +490,7 @@ func (a *api) DeletePlugin(id models.PluginID) error { return nil } } - return nil + return ErrNotFound } // func (a *API) PausePlugin(id models.PluginID) { @@ -556,10 +556,11 @@ func getFieldByName(destStruct reflect.Value, fieldName string) (reflect.Value, if fieldType.Tag.Get("toml") == fieldName { return field, fieldType.Type } - if strings.ToLower(fieldType.Name) == fieldName && unicode.IsUpper(rune(fieldType.Name[0])) { - return field, fieldType.Type + if name, ok := toSnakeCase(fieldType.Name, fieldType); ok { + if name == fieldName && isExported(fieldType) { + return field, fieldType.Type + } } - // handle snake string case conversion } return reflect.ValueOf(nil), reflect.TypeOf(nil) } @@ -612,12 +613,12 @@ func getFieldConfig(p interface{}, cfg map[string]FieldConfig) { // if this field is a struct, get the structure of it. if ftType.Kind() == reflect.Struct && !isInternalStructFieldType(ft.Type) { if ft.Anonymous { // embedded - t := getSubTypeType(ft) + t := getSubTypeType(ft.Type) i := reflect.New(t) getFieldConfig(i.Interface(), cfg) } else { subCfg := map[string]FieldConfig{} - t := getSubTypeType(ft) + t := getSubTypeType(ft.Type) i := reflect.New(t) getFieldConfig(i.Interface(), subCfg) cfg[ft.Name] = FieldConfig{ @@ -650,7 +651,7 @@ func getFieldConfig(p interface{}, cfg map[string]FieldConfig) { // if we found a slice of objects, get the structure of that object if hasSubType(ft.Type) { - t := getSubTypeType(ft) + t := getSubTypeType(ft.Type) n := t.Name() _ = n fc.SubType = getFieldType(t) @@ -668,9 +669,157 @@ func getFieldConfig(p interface{}, cfg map[string]FieldConfig) { } } -func getFieldConfigValues(p interface{}, cfg map[string]interface{}) { - b, _ := json.Marshal(p) - _ = json.Unmarshal(b, &cfg) +// getFieldConfigValuesFromStruct takes a struct and populates a map. +func getFieldConfigValuesFromStruct(p interface{}, cfg map[string]interface{}) { + structVal := reflect.ValueOf(p) + structType := structVal.Type() + if structVal.IsZero() { + return + } + + for structType.Kind() == reflect.Ptr { + structVal = structVal.Elem() + structType = structType.Elem() + } + + // safety check. + if structType.Kind() != reflect.Struct { + // woah, what? + panic(fmt.Sprintf("getFieldConfigValues expected a struct type, but got %v %v", p, structType.String())) + } + + for i := 0; i < structType.NumField(); i++ { + f := structVal.Field(i) + ft := structType.Field(i) + + if !isExported(ft) { + continue + } + if ft.Name == "Log" { + continue + } + ftType := ft.Type + if ftType.Kind() == reflect.Ptr { + ftType = ftType.Elem() + f = f.Elem() + } + // if struct call self recursively + // if it's composed call self recursively + if name, ok := toSnakeCase(ft.Name, ft); ok { + setMapKey(cfg, name, f) + } + } +} + +func getFieldConfigValuesFromValue(val reflect.Value) interface{} { + typ := val.Type() + // typ may be a pointer to a type + if typ.Kind() == reflect.Ptr { + val = val.Elem() + typ = val.Type() + } + + switch typ.Kind() { + case reflect.Slice: + return getFieldConfigValuesFromSlice(val) + case reflect.Struct: + m := map[string]interface{}{} + getFieldConfigValuesFromStruct(val.Interface(), m) + return m + case reflect.Map: + return getFieldConfigValuesFromMap(val) + case reflect.Bool: + return val.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32: + return val.Int() + case reflect.Int64: + // special case for config.Duration, time.Duration etc. + switch typ.String() { + case "time.Duration", "config.Duration": + return time.Duration(val.Int()).String() + case "config.Size": + sz := config.Size(val.Int()) + return string((&sz).MarshalText()) + } + return val.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return val.Uint() + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Interface: + return val.Interface() // do we bother to decode this? + case reflect.Ptr: + return getFieldConfigValuesFromValue(val.Elem()) + case reflect.String: + return val.String() + default: + return val.Interface() // log that we missed the type? + } +} + +func getFieldConfigValuesFromMap(f reflect.Value) map[string]interface{} { + obj := map[string]interface{}{} + iter := f.MapRange() + for iter.Next() { + setMapKey(obj, iter.Key().String(), iter.Value()) + } + return obj +} + +func getFieldConfigValuesFromSlice(val reflect.Value) []interface{} { + s := []interface{}{} + for i := 0; i < val.Len(); i++ { + s = append(s, getFieldConfigValuesFromValue(val.Index(i))) + } + return s +} + +func setMapKey(obj map[string]interface{}, key string, v reflect.Value) { + v = reflect.ValueOf(getFieldConfigValuesFromValue(v)) + switch v.Kind() { + case reflect.Bool: + obj[key] = v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + obj[key] = v.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + obj[key] = v.Uint() + case reflect.Float32, reflect.Float64: + obj[key] = v.Float() + case reflect.Interface: + obj[key] = v.Interface() + case reflect.Map: + obj[key] = v.Interface().(map[string]interface{}) + case reflect.Ptr: + setMapKey(obj, key, v.Elem()) + case reflect.Slice: + obj[key] = v.Interface() + case reflect.String: + obj[key] = v.String() + case reflect.Struct: + obj[key] = v.Interface() + default: + // obj[key] = v.Interface() + panic("unhandled type " + v.Type().String() + " for field " + key) + } +} + +func isExported(ft reflect.StructField) bool { + return unicode.IsUpper(rune(ft.Name[0])) +} + +var matchFirstCapital = regexp.MustCompile("(.)([A-Z][a-z]+)") +var matchAllCapitals = regexp.MustCompile("([a-z0-9])([A-Z])") + +func toSnakeCase(str string, sf reflect.StructField) (result string, ok bool) { + if toml, ok := sf.Tag.Lookup("toml"); ok { + if toml == "-" { + return "", false + } + return toml, true + } + snakeStr := matchFirstCapital.ReplaceAllString(str, "${1}_${2}") + snakeStr = matchAllCapitals.ReplaceAllString(snakeStr, "${1}_${2}") + return strings.ToLower(snakeStr), true } func setObject(from, to reflect.Value, destType reflect.Type) error { @@ -918,28 +1067,27 @@ func hasSubType(t reflect.Type) bool { // []string => string // map[string]int => int // User => User -func getSubTypeType(structField reflect.StructField) reflect.Type { - ft := structField.Type - if ft.Kind() == reflect.Ptr { - ft = ft.Elem() +func getSubTypeType(typ reflect.Type) reflect.Type { + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() } - switch ft.Kind() { + switch typ.Kind() { case reflect.Slice: - t := ft.Elem() + t := typ.Elem() if t.Kind() == reflect.Ptr { t = t.Elem() } return t case reflect.Map: - t := ft.Elem() + t := typ.Elem() if t.Kind() == reflect.Ptr { t = t.Elem() } return t case reflect.Struct: - return ft + return typ } - panic(ft.String() + " is not a type that has subtype information (map, slice, struct)") + panic(typ.String() + " is not a type that has subtype information (map, slice, struct)") } // getFieldType translates reflect.Types to our API field types. diff --git a/plugins/configs/api/controller_test.go b/plugins/configs/api/controller_test.go index 874e9f26088ff..0d2232390fefd 100644 --- a/plugins/configs/api/controller_test.go +++ b/plugins/configs/api/controller_test.go @@ -2,6 +2,7 @@ package api import ( "context" + "encoding/json" "net/http" "testing" "time" @@ -331,12 +332,12 @@ func TestSetFieldConfig(t *testing.T) { func TestExampleWorstPlugin(t *testing.T) { input := map[string]interface{}{ - "elapsed": "3s", - "elapsed2": "4s", - "readtimeout": "5s", - "size1": "8MiB", - "size2": "9MiB", - "pointerstruct": map[string]interface{}{ + "elapsed": "3s", + "elapsed2": "4s", + "read_timeout": "5s", + "size1": "8MiB", + "size2": "9MiB", + "pointer_struct": map[string]interface{}{ "field": "f", }, "b": true, @@ -358,10 +359,10 @@ func TestExampleWorstPlugin(t *testing.T) { "field2": 1, "field3": float64(5), }, - "reservedkeys": map[string]bool{ + "reserved_keys": map[string]bool{ "key": true, }, - "stringtonumber": map[string][]map[string]float64{ + "string_to_number": map[string][]map[string]float64{ "s": { { "n": 1.0, @@ -379,7 +380,7 @@ func TestExampleWorstPlugin(t *testing.T) { }, }, "value": "string", - "devicetags": map[string][]map[string]string{ + "device_tags": map[string][]map[string]string{ "s": { { "n": "1.0", @@ -389,10 +390,10 @@ func TestExampleWorstPlugin(t *testing.T) { "percentiles": []interface{}{ 1, }, - "floatpercentiles": []interface{}{ + "float_percentiles": []interface{}{ 1.0, }, - "mapofstructs": map[string]interface{}{ + "map_of_structs": map[string]interface{}{ "src": map[string]interface{}{ "dest": "d", }, @@ -402,7 +403,7 @@ func TestExampleWorstPlugin(t *testing.T) { 1, 2.0, }, - "tagslice": []interface{}{ + "tag_slice": []interface{}{ []interface{}{ "s", }, @@ -532,3 +533,175 @@ type baseopts struct { func intptr(i int) *int { return &i } + +func TestGetFieldConfigValues(t *testing.T) { + readTimeout := config.Duration(5 * time.Second) + b := true + i := 1 + f := float64(6) + s := "I am a string pointer" + header := http.Header{ + "Content-Type": []string{"json/application", "text/html"}, + } + input := ExampleWorstPlugin{ + Elapsed: config.Duration(3 * time.Second), + Elapsed2: config.Duration(4 * time.Second), + ReadTimeout: &readTimeout, + Size1: config.Size(8 * 1024 * 1024), + Size2: config.Size(9 * 1024 * 1024), + PointerStruct: &baseopts{Field: "f"}, + B: &b, + I: &i, + I8: 2, + I32: 3, + U8: 4, + F: 5, + PF: &f, + PS: &s, + Header: header, + DefaultFieldsSets: map[string]interface{}{ + "field1": "field1", + "field2": 1, + "field3": float64(5), + }, + ReservedKeys: map[string]bool{ + "key": true, + }, + StringToNumber: map[string][]map[string]float64{ + "s": { + { + "n": 1.0, + }, + }, + }, + Clean: []baseopts{ + {Field: "fieldtest"}, + }, + Templates: []*baseopts{ + {Tag: "tagtest"}, + }, + Value: "string", + DeviceTags: map[string][]map[string]string{ + "s": { + { + "n": "1.0", + }, + }, + }, + Percentiles: []int64{ + 1, + }, + FloatPercentiles: []float64{1.0}, + MapOfStructs: map[string]baseopts{ + "src": { + Dest: "d", + }, + }, + Command: []interface{}{ + "string", + 1, + 2.0, + }, + TagSlice: [][]string{ + {"s"}, + }, + Address: []uint16{ + 1, + }, + } + expected := map[string]interface{}{ + "elapsed": "3s", + "elapsed2": "4s", + "read_timeout": "5s", + "size1": "8MiB", + "size2": "9MiB", + "pointer_struct": map[string]interface{}{ + "field": "f", + "dest": "", + "tag": "", + }, + "b": true, + "i": 1, + "i8": 2, + "i32": 3, + "u8": 4, + "f": 5.0, + "pf": 6.0, + "ps": "I am a string pointer", + // type Header map[string][]string + "header": map[string]interface{}{ + "Content-Type": []interface{}{ + "json/application", "text/html", + }, + }, + "fields": map[string]interface{}{ + "field1": "field1", + "field2": 1, + "field3": float64(5), + }, + "reserved_keys": map[string]bool{ + "key": true, + }, + "string_to_number": map[string][]map[string]float64{ + "s": { + { + "n": 1.0, + }, + }, + }, + "clean": []map[string]interface{}{ + { + "field": "fieldtest", + "dest": "", + "tag": "", + }, + }, + "templates": []map[string]interface{}{ + { + "tag": "tagtest", + "dest": "", + "field": "", + }, + }, + "value": "string", + "device_tags": map[string][]map[string]string{ + "s": { + { + "n": "1.0", + }, + }, + }, + "percentiles": []interface{}{ + 1, + }, + "float_percentiles": []interface{}{ + 1.0, + }, + "map_of_structs": map[string]interface{}{ + "src": map[string]interface{}{ + "dest": "d", + "field": "", + "tag": "", + }, + }, + "command": []interface{}{ + "string", + 1, + 2.0, + }, + "tag_slice": []interface{}{ + []interface{}{ + "s", + }, + }, + "address": []interface{}{ + 1, + }, + } + actual := map[string]interface{}{} + getFieldConfigValuesFromStruct(input, actual) + // require.Equal(t, expected, actual) + expJson, _ := json.Marshal(expected) + actualJson, _ := json.Marshal(actual) + require.Equal(t, string(expJson), string(actualJson)) +} diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index 43695ceff03fb..75975e47d3238 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -3,6 +3,7 @@ package api import ( "context" "encoding/json" + "errors" "fmt" "log" // nolint:revive "net/http" @@ -13,6 +14,12 @@ import ( "github.com/influxdata/telegraf/models" ) +var ( + ClientErr = errors.New("error") + ErrBadRequest = fmt.Errorf("%w bad request", ClientErr) + ErrNotFound = fmt.Errorf("%w not found", ClientErr) +) + type ConfigAPIService struct { server *http.Server api *api @@ -60,14 +67,12 @@ func (s *ConfigAPIService) createPlugin(w http.ResponseWriter, req *http.Request dec := json.NewDecoder(req.Body) if err := dec.Decode(&cfg); err != nil { - s.Log.Error("decode error %v", err) - w.WriteHeader(http.StatusBadRequest) + s.renderError(fmt.Errorf("%w: decode failed %v", ErrBadRequest, err), w) return } id, err := s.api.CreatePlugin(cfg, "") if err != nil { - s.Log.Error("error creating plugin %v", err) - w.WriteHeader(http.StatusBadRequest) + s.renderError(err, w) return } w.Header().Set("Content-Type", "application/json") @@ -78,6 +83,20 @@ func (s *ConfigAPIService) createPlugin(w http.ResponseWriter, req *http.Request } } +func (s *ConfigAPIService) renderError(err error, w http.ResponseWriter) { + if errors.Is(err, ErrBadRequest) { + s.Log.Error(err) + w.WriteHeader(http.StatusBadRequest) + return + } else if errors.Is(err, ErrNotFound) { + s.Log.Error(err) + w.WriteHeader(http.StatusNotFound) + return + } + s.Log.Error(err) + w.WriteHeader(http.StatusInternalServerError) +} + func (s *ConfigAPIService) Start() { go func() { _ = s.server.ListenAndServe() @@ -90,8 +109,7 @@ func (s *ConfigAPIService) listPlugins(w http.ResponseWriter, req *http.Request) bytes, err := json.Marshal(typeInfo) if err != nil { - log.Printf("!E [configapi] error marshalling json: %v", err) - w.WriteHeader(http.StatusInternalServerError) + s.renderError(fmt.Errorf("marshal failed %w", err), w) return } w.Header().Set("Content-Type", "application/json") @@ -108,8 +126,7 @@ func (s *ConfigAPIService) runningPlugins(w http.ResponseWriter, req *http.Reque bytes, err := json.Marshal(plugins) if err != nil { - log.Printf("!E [configapi] error marshalling json: %v", err) - w.WriteHeader(http.StatusInternalServerError) + s.renderError(fmt.Errorf("marshal failed %w", err), w) return } w.Header().Set("Content-Type", "application/json") @@ -163,9 +180,7 @@ func (s *ConfigAPIService) deletePlugin(w http.ResponseWriter, req *http.Request return } if err := s.api.DeletePlugin(models.PluginID(id)); err != nil { - s.Log.Error("error deleting plugin %v", err) - // TODO: improve error handling? Would like to see status based on error type - w.WriteHeader(http.StatusInternalServerError) + s.renderError(fmt.Errorf("delete plugin %w", err), w) } w.WriteHeader(http.StatusOK) } diff --git a/plugins/configs/api/listener_test.go b/plugins/configs/api/listener_test.go index b1a6375fd18da..430bdf55610c7 100644 --- a/plugins/configs/api/listener_test.go +++ b/plugins/configs/api/listener_test.go @@ -13,6 +13,7 @@ import ( "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -120,6 +121,7 @@ func TestStopPlugin(t *testing.T) { s := &ConfigAPIService{ api: api, + Log: testutil.Logger{}, } srv := httptest.NewServer(s.mux()) @@ -192,7 +194,53 @@ func TestStopPlugin(t *testing.T) { require.NoError(t, err) _ = resp.Body.Close() - // still expect 200 response - require.EqualValues(t, 200, resp.StatusCode) + require.EqualValues(t, 404, resp.StatusCode) + require.NoError(t, err) +} + +func TestStatusCodes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + outputCtx, outputCancel := context.WithCancel(context.Background()) + defer outputCancel() + + c := config.NewConfig() + a := agent.NewAgent(ctx, c) + api := newAPI(ctx, outputCtx, c, a) + go a.RunWithAPI(outputCancel) + + s := &ConfigAPIService{ + api: api, + Log: testutil.Logger{}, + } + srv := httptest.NewServer(s.mux()) + + // Error finding plugin with wrong name + buf := bytes.NewBufferString(`{ + "name": "inputs.blah", + "config": { + "files": ["testdata.lp"], + "data_format": "influx" + } + }`) + resp, err := http.Post(srv.URL+"/plugins/create", "application/json", buf) + require.NoError(t, err) + _ = resp.Body.Close() + require.EqualValues(t, 404, resp.StatusCode) + require.NoError(t, err) + + // Error creating plugin with wrong data format + buf = bytes.NewBufferString(`{ + "name": "inputs.file", + "config": { + "files": ["testdata.lp"], + "data_format": "blah" + } + }`) + resp, err = http.Post(srv.URL+"/plugins/create", "application/json", buf) + require.NoError(t, err) + _ = resp.Body.Close() + require.EqualValues(t, 400, resp.StatusCode) require.NoError(t, err) } From f920061c4b6024abced98655a64277fe30f3cf61 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 20 Aug 2021 10:48:49 -0400 Subject: [PATCH 46/48] resolve linter --- plugins/configs/api/controller.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index a55a3fd459c4e..80d6d0ef6d4c1 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -700,7 +700,6 @@ func getFieldConfigValuesFromStruct(p interface{}, cfg map[string]interface{}) { } ftType := ft.Type if ftType.Kind() == reflect.Ptr { - ftType = ftType.Elem() f = f.Elem() } // if struct call self recursively From 196645d0ed3d5f6a067f170129deccd4a2fd5636 Mon Sep 17 00:00:00 2001 From: helenosheaa <38860767+helenosheaa@users.noreply.github.com> Date: Fri, 20 Aug 2021 12:46:21 -0400 Subject: [PATCH 47/48] resolve linter --- plugins/configs/api/controller_test.go | 6 +++--- plugins/configs/api/listener.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/configs/api/controller_test.go b/plugins/configs/api/controller_test.go index 0d2232390fefd..022fab4208d40 100644 --- a/plugins/configs/api/controller_test.go +++ b/plugins/configs/api/controller_test.go @@ -701,7 +701,7 @@ func TestGetFieldConfigValues(t *testing.T) { actual := map[string]interface{}{} getFieldConfigValuesFromStruct(input, actual) // require.Equal(t, expected, actual) - expJson, _ := json.Marshal(expected) - actualJson, _ := json.Marshal(actual) - require.Equal(t, string(expJson), string(actualJson)) + expJSON, _ := json.Marshal(expected) + actualJSON, _ := json.Marshal(actual) + require.Equal(t, string(expJSON), string(actualJSON)) } diff --git a/plugins/configs/api/listener.go b/plugins/configs/api/listener.go index 75975e47d3238..bff7b3d9bb888 100644 --- a/plugins/configs/api/listener.go +++ b/plugins/configs/api/listener.go @@ -15,9 +15,9 @@ import ( ) var ( - ClientErr = errors.New("error") - ErrBadRequest = fmt.Errorf("%w bad request", ClientErr) - ErrNotFound = fmt.Errorf("%w not found", ClientErr) + ErrClient = errors.New("error") + ErrBadRequest = fmt.Errorf("%w bad request", ErrClient) + ErrNotFound = fmt.Errorf("%w not found", ErrClient) ) type ConfigAPIService struct { From d905b9a1c564bfa0ff2c3fc5936c1f2382233f6d Mon Sep 17 00:00:00 2001 From: Steven Soroka Date: Fri, 27 Aug 2021 16:06:05 -0400 Subject: [PATCH 48/48] finish hooking up storage. add jsonfile storage module for easy testing --- config/config.go | 14 ++--- models/running_processor.go | 1 + plugins/configs/api/controller.go | 81 +++++++++++++++++++--------- plugins/configs/api/plugin.go | 43 ++++++++++++++- plugins/storage/all/all.go | 1 + plugins/storage/jsonfile/jsonfile.go | 79 +++++++++++++++++++++++++++ 6 files changed, 185 insertions(+), 34 deletions(-) create mode 100644 plugins/storage/jsonfile/jsonfile.go diff --git a/config/config.go b/config/config.go index 2b4a95c0066c8..f830034efe387 100644 --- a/config/config.go +++ b/config/config.go @@ -1029,14 +1029,14 @@ func (c *Config) addConfigPlugin(ctx context.Context, outputCtx context.Context, if stCfg, ok := table.Fields["storage"]; ok { storageCfgTable := stCfg.(*ast.Table) - if cfg, ok := storageCfgTable.Fields["internal"]; ok { - // create internal storage plugin - // set cfg - sp := StoragePlugins["internal"]() - if err := c.toml.UnmarshalTable(cfg.(*ast.Table), sp); err != nil { - return err + for name, cfg := range storageCfgTable.Fields { + if spc, ok := StoragePlugins[name]; ok { + sp := spc() + if err := c.toml.UnmarshalTable(cfg.(*ast.Table), sp); err != nil { + return err + } + reflect.ValueOf(configPlugin).Elem().FieldByName("Storage").Set(reflect.ValueOf(sp)) } - reflect.ValueOf(configPlugin).Elem().FieldByName("Storage").Set(reflect.ValueOf(sp)) } delete(table.Fields, "storage") } diff --git a/models/running_processor.go b/models/running_processor.go index f07822c3d69b7..8d56f6f3beacf 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -66,6 +66,7 @@ func (rp *RunningProcessor) Log() telegraf.Logger { return rp.log } +// LogName returns the name of the processor as the logs would see it, with any alias. func (rp *RunningProcessor) LogName() string { return logName("processors", rp.Config.Name, rp.Config.Alias) } diff --git a/plugins/configs/api/controller.go b/plugins/configs/api/controller.go index 80d6d0ef6d4c1..3c3693807bfcd 100644 --- a/plugins/configs/api/controller.go +++ b/plugins/configs/api/controller.go @@ -32,6 +32,9 @@ type api struct { // api shutdown context ctx context.Context outputCtx context.Context + + addHooks []PluginCallbackEvent + removeHooks []PluginCallbackEvent } // nolint:revive @@ -45,7 +48,7 @@ func newAPI(ctx context.Context, outputCtx context.Context, cfg *config.Config, return c } -// PluginConfig is a plugin name and details about the config fields. +// PluginConfigTypeInfo is a plugin name and details about the config fields. type PluginConfigTypeInfo struct { Name string `json:"name"` Config map[string]FieldConfig `json:"config"` @@ -312,21 +315,25 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo return "", fmt.Errorf("%w: setting field %s", ErrBadRequest, err) } - rp := models.NewRunningInput(i, pluginConfig) + ri := models.NewRunningInput(i, pluginConfig) if len(forcedID) > 0 { - rp.ID = forcedID.Uint64() + ri.ID = forcedID.Uint64() } - rp.SetDefaultTags(a.config.Tags) + ri.SetDefaultTags(a.config.Tags) - if err := rp.Init(); err != nil { + if err := ri.Init(); err != nil { return "", fmt.Errorf("%w: could not initialize plugin %s", ErrBadRequest, err) } - a.agent.AddInput(rp) + a.agent.AddInput(ri) + a.addPluginHook(PluginConfig{ID: string(idToString(ri.ID)), PluginConfigCreate: PluginConfigCreate{ + Name: "inputs." + name, // TODO: use PluginName() or something + Config: cfg.Config, + }}) - go a.agent.RunInput(rp, time.Now()) + go a.agent.RunInput(ri, time.Now()) - return idToString(rp.ID), nil + return idToString(ri.ID), nil case "outputs": // add an output output, ok := outputs.Outputs[name] @@ -372,6 +379,11 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo a.agent.AddOutput(ro) + a.addPluginHook(PluginConfig{ID: string(idToString(ro.ID)), PluginConfigCreate: PluginConfigCreate{ + Name: "outputs." + name, // TODO: use PluginName() or something + Config: cfg.Config, + }}) + go a.agent.RunOutput(a.outputCtx, ro) return idToString(ro.ID), nil @@ -405,6 +417,12 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo return "", fmt.Errorf("%w: could not initialize plugin %s", ErrBadRequest, err) } a.agent.AddProcessor(ra) + + a.addPluginHook(PluginConfig{ID: string(idToString(ra.ID)), PluginConfigCreate: PluginConfigCreate{ + Name: "aggregators." + name, // TODO: use PluginName() or something + Config: cfg.Config, + }}) + go a.agent.RunProcessor(ra) return idToString(ra.ID), nil @@ -441,6 +459,11 @@ func (a *api) CreatePlugin(cfg PluginConfigCreate, forcedID models.PluginID) (mo a.agent.AddProcessor(rp) + a.addPluginHook(PluginConfig{ID: string(idToString(rp.ID)), PluginConfigCreate: PluginConfigCreate{ + Name: "processors." + name, // TODO: use PluginName() or something + Config: cfg.Config, + }}) + go a.agent.RunProcessor(rp) return idToString(rp.ID), nil @@ -469,6 +492,8 @@ func (a *api) GetPluginStatus(id models.PluginID) models.PluginState { } func (a *api) DeletePlugin(id models.PluginID) error { + a.removePluginHook(PluginConfig{ID: string(id)}) + for _, v := range a.agent.RunningInputs() { if v.ID == id.Uint64() { log.Printf("I! [configapi] stopping plugin %q", v.LogName()) @@ -493,13 +518,31 @@ func (a *api) DeletePlugin(id models.PluginID) error { return ErrNotFound } -// func (a *API) PausePlugin(id models.PluginID) { +type PluginCallbackEvent func(p PluginConfig) -// } +// addPluginHook triggers the hook to fire this event +func (a *api) addPluginHook(p PluginConfig) { + for _, h := range a.addHooks { + h(p) + } +} -// func (a *API) ResumePlugin(id models.PluginID) { +// removePluginHook triggers the hook to fire this event +func (a *api) removePluginHook(p PluginConfig) { + for _, h := range a.removeHooks { + h(p) + } +} -// } +// OnPluginAdded adds a hook to get notified of this event +func (a *api) OnPluginAdded(f PluginCallbackEvent) { + a.addHooks = append(a.addHooks, f) +} + +// OnPluginRemoved adds a hook to get notified of this event +func (a *api) OnPluginRemoved(f PluginCallbackEvent) { + a.removeHooks = append(a.removeHooks, f) +} // setFieldConfig takes a map of field names to field values and sets them on the plugin func setFieldConfig(cfg map[string]interface{}, p interface{}) error { @@ -525,7 +568,7 @@ func setFieldConfig(cfg map[string]interface{}, p interface{}) error { } val := reflect.ValueOf(v) if err := setObject(val, destField, destFieldType); err != nil { - return fmt.Errorf("Could not set field %s: %w", k, err) + return fmt.Errorf("Could not set field %q: %w", k, err) } } return nil @@ -853,18 +896,6 @@ func setObject(from, to reflect.Value, destType reflect.Type) error { return fmt.Errorf("Couldn't parse duration %q: %w", from.Interface().(string), err) } to.SetInt(int64(d)) - case "internal.Duration": - d, err := time.ParseDuration(from.Interface().(string)) - if err != nil { - return fmt.Errorf("Couldn't parse duration %q: %w", from.Interface().(string), err) - } - to.FieldByName("Duration").SetInt(int64(d)) - case "internal.Size": - size, err := units.ParseStrictBytes(from.Interface().(string)) - if err != nil { - return fmt.Errorf("Couldn't parse size %q: %w", from.Interface().(string), err) - } - to.FieldByName("Size").SetInt(size) case "config.Size": size, err := units.ParseStrictBytes(from.Interface().(string)) if err != nil { diff --git a/plugins/configs/api/plugin.go b/plugins/configs/api/plugin.go index e1c96fdb23011..e8c7cf2f018c5 100644 --- a/plugins/configs/api/plugin.go +++ b/plugins/configs/api/plugin.go @@ -4,9 +4,11 @@ import ( "context" "fmt" "net/http" + "sync" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/configs" ) @@ -19,8 +21,9 @@ type ConfigAPIPlugin struct { api *api server *ConfigAPIService - Log telegraf.Logger `toml:"-"` - plugins []PluginConfig + Log telegraf.Logger `toml:"-"` + plugins []PluginConfig + pluginsMutex sync.Mutex } func (a *ConfigAPIPlugin) GetName() string { @@ -43,6 +46,39 @@ func (a *ConfigAPIPlugin) Init(ctx context.Context, outputCtx context.Context, c } } + a.Log.Info(fmt.Sprintf("Loading %d stored plugins", len(a.plugins))) + for _, plug := range a.plugins { + id, err := a.api.CreatePlugin(plug.PluginConfigCreate, models.PluginID(plug.ID)) + if err != nil { + a.Log.Errorf("Couldn't recreate plugin %q: %s", id, err) + } + } + + a.api.OnPluginAdded(func(p PluginConfig) { + a.pluginsMutex.Lock() + defer a.pluginsMutex.Unlock() + a.plugins = append(a.plugins, p) + if err := a.Storage.Save("config-api", "plugins", &a.plugins); err != nil { + a.Log.Error("saving plugin state: " + err.Error()) + } + }) + a.api.OnPluginRemoved(func(p PluginConfig) { + a.pluginsMutex.Lock() + defer a.pluginsMutex.Unlock() + changed := false + for i, plug := range a.plugins { + if plug.ID == p.ID { + a.plugins = append(a.plugins[:i], a.plugins[i+1:]...) + changed = true + } + } + if changed { + if err := a.Storage.Save("config-api", "plugins", &a.plugins); err != nil { + a.Log.Error("saving plugin state: " + err.Error()) + } + } + }) + // start listening for HTTP requests tlsConfig, err := a.TLSConfig() if err != nil { @@ -66,6 +102,9 @@ func (a *ConfigAPIPlugin) Close() error { // wait until all requests finish a.server.Stop() + a.pluginsMutex.Lock() + defer a.pluginsMutex.Unlock() + // store state if a.Storage != nil { if err := a.Storage.Save("config-api", "plugins", &a.plugins); err != nil { diff --git a/plugins/storage/all/all.go b/plugins/storage/all/all.go index 15e437f9bdbad..a5313df81458b 100644 --- a/plugins/storage/all/all.go +++ b/plugins/storage/all/all.go @@ -3,4 +3,5 @@ package all import ( // Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/storage/boltdb" + _ "github.com/influxdata/telegraf/plugins/storage/jsonfile" ) diff --git a/plugins/storage/jsonfile/jsonfile.go b/plugins/storage/jsonfile/jsonfile.go new file mode 100644 index 0000000000000..8a78482a7fd19 --- /dev/null +++ b/plugins/storage/jsonfile/jsonfile.go @@ -0,0 +1,79 @@ +package jsonfile + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/storage" +) + +type JSONFileStorage struct { + Filename string `toml:"file"` +} + +func (s *JSONFileStorage) Init() error { + if len(s.Filename) == 0 { + return fmt.Errorf("Storage service requires filename") + } + return nil +} + +func (s *JSONFileStorage) Close() error { + return nil +} + +func (s *JSONFileStorage) Load(namespace, key string, obj interface{}) error { + f, err := os.Open(s.Filename) + if os.IsNotExist(err) { + return nil + } + if err != nil { + return err + } + dec := json.NewDecoder(f) + m := map[string]interface{}{} + err = dec.Decode(&m) + if err != nil { + return err + } + if v, ok := m[namespace]; ok { + m = v.(map[string]interface{}) + } + if v, ok := m[key]; ok { + b, err := json.Marshal(v) + if err != nil { + return err + } + err = json.Unmarshal(b, obj) + if err != nil { + return err + } + + return nil + } + return nil +} + +func (s *JSONFileStorage) Save(namespace, key string, value interface{}) error { + m := map[string]interface{}{} + m[namespace] = map[string]interface{}{ + key: value, + } + data, err := json.Marshal(m) + if err != nil { + return err + } + return os.WriteFile(s.Filename, data, 0600) +} + +func (s *JSONFileStorage) GetName() string { + return "jsonfile" +} + +func init() { + storage.Add("jsonfile", func() config.StoragePlugin { + return &JSONFileStorage{} + }) +}