From 6f2eeae49822782b85caf7639067685b812bc325 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 28 Feb 2017 12:46:27 +0000 Subject: [PATCH 001/201] Remove sleep from riemann test --- plugins/outputs/riemann/riemann_test.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go index e03d720ce26e4..67a161be5ab1d 100644 --- a/plugins/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -193,7 +193,16 @@ func TestConnectAndWrite(t *testing.T) { err = r.Write(metrics) require.NoError(t, err) - time.Sleep(200 * time.Millisecond) + start := time.Now() + for true { + events, _ := r.client.Query(`tagged "docker"`) + if len(events) > 0 { + break + } + if time.Since(start) > time.Second { + break + } + } // are there any "docker" tagged events in Riemann? events, err := r.client.Query(`tagged "docker"`) From b9457a109268ef1c055f4aa5b4f92a42afc21c24 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 21 Feb 2017 19:50:10 +0100 Subject: [PATCH 002/201] log error message when invalid regex is used closes #2178 --- CHANGELOG.md | 1 + plugins/inputs/logparser/grok/grok_test.go | 37 ++++++++++++++++++++++ plugins/inputs/logparser/logparser.go | 2 ++ 3 files changed, 40 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 509d6f2f1e7dc..2a43e844f993b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ be deprecated eventually. - [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output. - [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin. - [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation. +- [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead. ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/inputs/logparser/grok/grok_test.go index 1344896b84c40..4e0ead6e9a67d 100644 --- a/plugins/inputs/logparser/grok/grok_test.go +++ b/plugins/inputs/logparser/grok/grok_test.go @@ -57,6 +57,43 @@ func Benchmark_ParseLine_CustomPattern(b *testing.B) { benchM = m } +// Test a very simple parse pattern. +func TestSimpleParse(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TESTLOG}"}, + CustomPatterns: ` + TESTLOG %{NUMBER:num:int} %{WORD:client} + `, + } + assert.NoError(t, p.Compile()) + + m, err := p.ParseLine(`142 bot`) + assert.NoError(t, err) + require.NotNil(t, m) + + assert.Equal(t, + map[string]interface{}{ + "num": int64(142), + "client": "bot", + }, + m.Fields()) +} + +// Verify that patterns with a regex lookahead fail at compile time. +func TestParsePatternsWithLookahead(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{MYLOG}"}, + CustomPatterns: ` + NOBOT ((?!bot|crawl).)* + MYLOG %{NUMBER:num:int} %{NOBOT:client} + `, + } + assert.NoError(t, p.Compile()) + + _, err := p.ParseLine(`1466004605359052000 bot`) + assert.Error(t, err) +} + func TestMeasurementName(t *testing.T) { p := &Parser{ Measurement: "my_web_log", diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 8ec32835829a1..a228322772c13 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -226,6 +226,8 @@ func (l *LogParserPlugin) parser() { if m != nil { l.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) } + } else { + log.Println("E! Error parsing log line: " + err.Error()) } } } From 9e810ac46369e723f14fb3c51cbada859b4d0f8c Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Thu, 23 Feb 2017 13:45:36 +0000 Subject: [PATCH 003/201] Handle nil os.FileInfo in filepath.Walk closes #2466 --- CHANGELOG.md | 1 + internal/config/config.go | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a43e844f993b..6222434587e30 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ be deprecated eventually. - [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin. - [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation. - [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead. +- [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files. ## v1.2.1 [2017-02-01] diff --git a/internal/config/config.go b/internal/config/config.go index 90b158716abe7..651c4e9ef4bf3 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -505,6 +505,10 @@ func PrintOutputConfig(name string) error { func (c *Config) LoadDirectory(path string) error { walkfn := func(thispath string, info os.FileInfo, _ error) error { + if info == nil { + log.Printf("W! Telegraf is not permitted to read %s", thispath) + return nil + } if info.IsDir() { return nil } From a251adb838cbcdc24f678a59d6551cc3ec94fea2 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 1 Mar 2017 11:22:42 +0000 Subject: [PATCH 004/201] Fix type conflict on windows ping plugin (#2462) closes #1433 --- CHANGELOG.md | 7 +++++++ plugins/inputs/ping/ping_windows.go | 12 ++++++------ plugins/inputs/ping/ping_windows_test.go | 6 +++--- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6222434587e30..d8dc382d7db1d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,12 @@ ### Release Notes +- Users of the windows `ping` plugin will need to drop or migrate their +measurements in order to continue using the plugin. The reason for this is that +the windows plugin was outputting a different type than the linux plugin. This +made it impossible to use the `ping` plugin for both windows and linux +machines. + - Ceph: the `ceph_pgmap_state` metric content has been modified to use a unique field `count`, with each state expressed as a `state` tag. Telegraf < 1.3: @@ -65,6 +71,7 @@ be deprecated eventually. - [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output. - [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin. - [#2414](https://github.com/influxdata/telegraf/issues/2414): Missing error handling in the MySQL plugin leads to segmentation violation. +- [#2462](https://github.com/influxdata/telegraf/pull/2462): Fix type conflict in windows ping plugin. - [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead. - [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files. diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index 7fb112810af3d..b1212eaaae8b2 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -40,10 +40,10 @@ func (s *Ping) Description() string { const sampleConfig = ` ## urls to ping urls = ["www.google.com"] # required - + ## number of pings to send per collection (ping -n ) count = 4 # required - + ## Ping timeout, in seconds. 0 means default timeout (ping -w ) Timeout = 0 ` @@ -64,7 +64,7 @@ func hostPinger(timeout float64, args ...string) (string, error) { } // processPingOutput takes in a string output from the ping command -// based on linux implementation but using regex ( multilanguage support ) ( shouldn't affect the performance of the program ) +// based on linux implementation but using regex ( multilanguage support ) // It returns (, , , , , ) func processPingOutput(out string) (int, int, int, int, int, int, error) { // So find a line contain 3 numbers except reply lines @@ -189,13 +189,13 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { "percent_reply_loss": lossReply, } if avg > 0 { - fields["average_response_ms"] = avg + fields["average_response_ms"] = float64(avg) } if min > 0 { - fields["minimum_response_ms"] = min + fields["minimum_response_ms"] = float64(min) } if max > 0 { - fields["maximum_response_ms"] = max + fields["maximum_response_ms"] = float64(max) } acc.AddFields("ping", fields, tags) }(url) diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 34428b8146a6b..b55b7955b4110 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -77,9 +77,9 @@ func TestPingGather(t *testing.T) { "reply_received": 4, "percent_packet_loss": 0.0, "percent_reply_loss": 0.0, - "average_response_ms": 50, - "minimum_response_ms": 50, - "maximum_response_ms": 52, + "average_response_ms": 50.0, + "minimum_response_ms": 50.0, + "maximum_response_ms": 52.0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) From 96185159264e5e1a5259cf6329c71ac3a3944ca8 Mon Sep 17 00:00:00 2001 From: Chris Koehnke Date: Thu, 2 Mar 2017 03:43:33 -0500 Subject: [PATCH 005/201] Disk counter array newline (#2481) Tweak formatting of `LogicalDisk` counter array to have one entry per line. --- etc/telegraf_windows.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 7380ab8a3b14b..535b0f414c362 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -117,7 +117,8 @@ Instances = ["*"] Counters = [ "% Idle Time", - "% Disk Time","% Disk Read Time", + "% Disk Time", + "% Disk Read Time", "% Disk Write Time", "Current Disk Queue Length", "% Free Space", From 1873abd2484987280b29322cfb03b29f471fca7b Mon Sep 17 00:00:00 2001 From: Charles-Henri Date: Thu, 2 Mar 2017 10:58:26 +0100 Subject: [PATCH 006/201] Iptables input: document better the ignored rules behavior (#2482) During issue #2215 it was highlighted that the current behavior where rules without a comment are ignored is confusing for several users. This commit improves the documentation and adds a NOTE to the sample config to clarify the behavior for new users. --- CHANGELOG.md | 1 + plugins/inputs/iptables/README.md | 6 +++++- plugins/inputs/iptables/iptables.go | 6 ++++-- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d8dc382d7db1d..fe56317671f91 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,6 +74,7 @@ be deprecated eventually. - [#2462](https://github.com/influxdata/telegraf/pull/2462): Fix type conflict in windows ping plugin. - [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead. - [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files. +- [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored. ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md index a711f1d4e3a3a..2b226b9fe74eb 100644 --- a/plugins/inputs/iptables/README.md +++ b/plugins/inputs/iptables/README.md @@ -2,7 +2,11 @@ The iptables plugin gathers packets and bytes counters for rules within a set of table and chain from the Linux's iptables firewall. -Rules are identified through associated comment. Rules without comment are ignored. +Rules are identified through associated comment. **Rules without comment are ignored**. +Indeed we need a unique ID for the rule and the rule number is not a constant: it may vary when rules are inserted/deleted at start-up or by automatic tools (interactive firewalls, fail2ban, ...). +Also when the rule set is becoming big (hundreds of lines) most people are interested in monitoring only a small part of the rule set. + +Before using this plugin **you must ensure that the rules you want to monitor are named with a unique comment**. Comments are added using the `-m comment --comment "my comment"` iptables options. The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You have several options to grant telegraf to run iptables: diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index 31b049d9f2820..eab33bf9f21f1 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -33,14 +33,16 @@ func (ipt *Iptables) SampleConfig() string { ## iptables require root access on most systems. ## Setting 'use_sudo' to true will make use of sudo to run iptables. ## Users must configure sudo to allow telegraf user to run iptables with no password. - ## iptables can be restricted to only list command "iptables -nvL" + ## iptables can be restricted to only list command "iptables -nvL". use_sudo = false ## Setting 'use_lock' to true runs iptables with the "-w" option. ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") use_lock = false ## defines the table to monitor: table = "filter" - ## defines the chains to monitor: + ## defines the chains to monitor. + ## NOTE: iptables rules without a comment will not be monitored. + ## Read the plugin documentation for more information. chains = [ "INPUT" ] ` } From 10744646dbdd52dea349dd7773bcd7115e4a838d Mon Sep 17 00:00:00 2001 From: Jack Zampolin Date: Fri, 3 Mar 2017 10:24:50 -0800 Subject: [PATCH 007/201] AMQP Consumer plugin (#1678) --- CHANGELOG.md | 1 + README.md | 3 +- plugins/inputs/all/all.go | 1 + plugins/inputs/amqp_consumer/README.md | 47 +++ plugins/inputs/amqp_consumer/amqp_consumer.go | 280 ++++++++++++++++++ plugins/outputs/amqp/README.md | 11 +- plugins/outputs/amqp/amqp.go | 20 +- 7 files changed, 357 insertions(+), 6 deletions(-) create mode 100644 plugins/inputs/amqp_consumer/README.md create mode 100644 plugins/inputs/amqp_consumer/amqp_consumer.go diff --git a/CHANGELOG.md b/CHANGELOG.md index fe56317671f91..323b239155194 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ be deprecated eventually. - [#2244](https://github.com/influxdata/telegraf/pull/2244): Support ipmi_sensor plugin querying local ipmi sensors. - [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs. - [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK. +- [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin ### Bugfixes diff --git a/README.md b/README.md index 3dd06e93abe53..915c7b7612d91 100644 --- a/README.md +++ b/README.md @@ -97,9 +97,10 @@ configuration options. ## Input Plugins -* [aws cloudwatch](./plugins/inputs/cloudwatch) * [aerospike](./plugins/inputs/aerospike) +* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) * [apache](./plugins/inputs/apache) +* [aws cloudwatch](./plugins/inputs/cloudwatch) * [bcache](./plugins/inputs/bcache) * [cassandra](./plugins/inputs/cassandra) * [ceph](./plugins/inputs/ceph) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 924dffe3d3e8b..a9147c53ed153 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -2,6 +2,7 @@ package all import ( _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" + _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/cassandra" diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md new file mode 100644 index 0000000000000..85780700fb46b --- /dev/null +++ b/plugins/inputs/amqp_consumer/README.md @@ -0,0 +1,47 @@ +# AMQP Consumer Input Plugin + +This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). + +Metrics are read from a topic exchange using the configured queue and binding_key. + +Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). + +For an introduction to AMQP see: +- https://www.rabbitmq.com/tutorials/amqp-concepts.html +- https://www.rabbitmq.com/getstarted.html + +The following defaults are known to work with RabbitMQ: + +```toml +# AMQP consumer plugin +[[inputs.amqp_consumer]] + ## AMQP url + url = "amqp://localhost:5672/influxdb" + ## AMQP exchange + exchange = "telegraf" + ## AMQP queue name + queue = "telegraf" + ## Binding Key + binding_key = "#" + + ## Controls how many messages the server will try to keep on the network + ## for consumers before receiving delivery acks. + #prefetch_count = 50 + + ## Auth method. PLAIN and EXTERNAL are supported. + ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as + ## described here: https://www.rabbitmq.com/plugins.html + # auth_method = "PLAIN" + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go new file mode 100644 index 0000000000000..6f12244aa4ac5 --- /dev/null +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -0,0 +1,280 @@ +package amqp_consumer + +import ( + "fmt" + "log" + "strings" + "sync" + "time" + + "github.com/streadway/amqp" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +// AMQPConsumer is the top level struct for this plugin +type AMQPConsumer struct { + URL string + // AMQP exchange + Exchange string + // Queue Name + Queue string + // Binding Key + BindingKey string `toml:"binding_key"` + + // Controls how many messages the server will try to keep on the network + // for consumers before receiving delivery acks. + PrefetchCount int + + // AMQP Auth method + AuthMethod string + // Path to CA file + SSLCA string `toml:"ssl_ca"` + // Path to host cert file + SSLCert string `toml:"ssl_cert"` + // Path to cert key file + SSLKey string `toml:"ssl_key"` + // Use SSL but skip chain & host verification + InsecureSkipVerify bool + + parser parsers.Parser + conn *amqp.Connection + wg *sync.WaitGroup +} + +type externalAuth struct{} + +func (a *externalAuth) Mechanism() string { + return "EXTERNAL" +} +func (a *externalAuth) Response() string { + return fmt.Sprintf("\000") +} + +const ( + DefaultAuthMethod = "PLAIN" + DefaultPrefetchCount = 50 +) + +func (a *AMQPConsumer) SampleConfig() string { + return ` + ## AMQP url + url = "amqp://localhost:5672/influxdb" + ## AMQP exchange + exchange = "telegraf" + ## AMQP queue name + queue = "telegraf" + ## Binding Key + binding_key = "#" + + ## Maximum number of messages server should give to the worker. + prefetch_count = 50 + + ## Auth method. PLAIN and EXTERNAL are supported + ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as + ## described here: https://www.rabbitmq.com/plugins.html + # auth_method = "PLAIN" + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +` +} + +func (a *AMQPConsumer) Description() string { + return "AMQP consumer plugin" +} + +func (a *AMQPConsumer) SetParser(parser parsers.Parser) { + a.parser = parser +} + +// All gathering is done in the Start function +func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (a *AMQPConsumer) createConfig() (*amqp.Config, error) { + // make new tls config + tls, err := internal.GetTLSConfig( + a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify) + if err != nil { + return nil, err + } + + // parse auth method + var sasl []amqp.Authentication // nil by default + + if strings.ToUpper(a.AuthMethod) == "EXTERNAL" { + sasl = []amqp.Authentication{&externalAuth{}} + } + + config := amqp.Config{ + TLSClientConfig: tls, + SASL: sasl, // if nil, it will be PLAIN + } + return &config, nil +} + +// Start satisfies the telegraf.ServiceInput interface +func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { + amqpConf, err := a.createConfig() + if err != nil { + return err + } + + msgs, err := a.connect(amqpConf) + if err != nil { + return err + } + + a.wg = &sync.WaitGroup{} + a.wg.Add(1) + go a.process(msgs, acc) + + go func() { + err := <-a.conn.NotifyClose(make(chan *amqp.Error)) + if err == nil { + return + } + + log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err) + for { + msgs, err := a.connect(amqpConf) + if err != nil { + log.Printf("E! AMQP connection failed: %s", err) + time.Sleep(10 * time.Second) + continue + } + + a.wg.Add(1) + go a.process(msgs, acc) + break + } + }() + + return nil +} + +func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, error) { + conn, err := amqp.DialConfig(a.URL, *amqpConf) + if err != nil { + return nil, err + } + a.conn = conn + + ch, err := conn.Channel() + if err != nil { + return nil, fmt.Errorf("Failed to open a channel: %s", err) + } + + err = ch.ExchangeDeclare( + a.Exchange, // name + "topic", // type + true, // durable + false, // auto-deleted + false, // internal + false, // no-wait + nil, // arguments + ) + if err != nil { + return nil, fmt.Errorf("Failed to declare an exchange: %s", err) + } + + q, err := ch.QueueDeclare( + a.Queue, // queue + true, // durable + false, // delete when unused + false, // exclusive + false, // no-wait + nil, // arguments + ) + if err != nil { + return nil, fmt.Errorf("Failed to declare a queue: %s", err) + } + + err = ch.QueueBind( + q.Name, // queue + a.BindingKey, // binding-key + a.Exchange, // exchange + false, + nil, + ) + if err != nil { + return nil, fmt.Errorf("Failed to bind a queue: %s", err) + } + + err = ch.Qos( + a.PrefetchCount, + 0, // prefetch-size + false, // global + ) + if err != nil { + return nil, fmt.Errorf("Failed to set QoS: %s", err) + } + + msgs, err := ch.Consume( + q.Name, // queue + "", // consumer + false, // auto-ack + false, // exclusive + false, // no-local + false, // no-wait + nil, // arguments + ) + if err != nil { + return nil, fmt.Errorf("Failed establishing connection to queue: %s", err) + } + + log.Println("I! Started AMQP consumer") + return msgs, err +} + +// Read messages from queue and add them to the Accumulator +func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) { + defer a.wg.Done() + for d := range msgs { + metrics, err := a.parser.Parse(d.Body) + if err != nil { + log.Printf("E! %v: error parsing metric - %v", err, string(d.Body)) + } else { + for _, m := range metrics { + acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } + } + + d.Ack(false) + } + log.Printf("I! AMQP consumer queue closed") +} + +func (a *AMQPConsumer) Stop() { + err := a.conn.Close() + if err != nil && err != amqp.ErrClosed { + log.Printf("E! Error closing AMQP connection: %s", err) + return + } + a.wg.Wait() + log.Println("I! Stopped AMQP service") +} + +func init() { + inputs.Add("amqp_consumer", func() telegraf.Input { + return &AMQPConsumer{ + AuthMethod: DefaultAuthMethod, + PrefetchCount: DefaultPrefetchCount, + } + }) +} diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md index d49c507b8e7f1..208ae934cb35b 100644 --- a/plugins/outputs/amqp/README.md +++ b/plugins/outputs/amqp/README.md @@ -1,13 +1,18 @@ # AMQP Output Plugin -This plugin writes to a AMQP exchange using tag, defined in configuration file -as RoutingTag, as a routing key. +This plugin writes to a AMQP 0-9-1 Exchange, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). + +Metrics are written to a topic exchange using tag, defined in configuration file as RoutingTag, as a routing key. If RoutingTag is empty, then empty routing key will be used. Metrics are grouped in batches by RoutingTag. This plugin doesn't bind exchange to a queue, so it should be done by consumer. +For an introduction to AMQP see: +- https://www.rabbitmq.com/tutorials/amqp-concepts.html +- https://www.rabbitmq.com/getstarted.html + ### Configuration: ``` @@ -18,6 +23,8 @@ This plugin doesn't bind exchange to a queue, so it should be done by consumer. ## AMQP exchange exchange = "telegraf" ## Auth method. PLAIN and EXTERNAL are supported + ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as + ## described here: https://www.rabbitmq.com/plugins.html # auth_method = "PLAIN" ## Telegraf tag to use as a routing key ## ie, if this tag exists, it's value will be used as the routing key diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index d86cac5969b5d..7b4c7d4c9eb8c 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -40,6 +40,7 @@ type AMQP struct { // Use SSL but skip chain & host verification InsecureSkipVerify bool + conn *amqp.Connection channel *amqp.Channel sync.Mutex headers amqp.Table @@ -68,6 +69,8 @@ var sampleConfig = ` ## AMQP exchange exchange = "telegraf" ## Auth method. PLAIN and EXTERNAL are supported + ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as + ## described here: https://www.rabbitmq.com/plugins.html # auth_method = "PLAIN" ## Telegraf tag to use as a routing key ## ie, if this tag exists, it's value will be used as the routing key @@ -129,6 +132,8 @@ func (q *AMQP) Connect() error { if err != nil { return err } + q.conn = connection + channel, err := connection.Channel() if err != nil { return fmt.Errorf("Failed to open a channel: %s", err) @@ -148,7 +153,11 @@ func (q *AMQP) Connect() error { } q.channel = channel go func() { - log.Printf("I! Closing: %s", <-connection.NotifyClose(make(chan *amqp.Error))) + err := <-connection.NotifyClose(make(chan *amqp.Error)) + if err == nil { + return + } + log.Printf("I! Closing: %s", err) log.Printf("I! Trying to reconnect") for err := q.Connect(); err != nil; err = q.Connect() { log.Println("E! ", err.Error()) @@ -160,7 +169,12 @@ func (q *AMQP) Connect() error { } func (q *AMQP) Close() error { - return q.channel.Close() + err := q.conn.Close() + if err != nil && err != amqp.ErrClosed { + log.Printf("E! Error closing AMQP connection: %s", err) + return err + } + return nil } func (q *AMQP) SampleConfig() string { @@ -207,7 +221,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { Body: buf, }) if err != nil { - return fmt.Errorf("FAILED to send amqp message: %s", err) + return fmt.Errorf("Failed to send AMQP message: %s", err) } } return nil From 76bcdecd21e7aa63734564715563392fa64ad0a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20de=20Metz?= Date: Mon, 6 Mar 2017 12:34:41 +0000 Subject: [PATCH 008/201] Respond 200 when receiving a ping event. (#2492) --- plugins/inputs/webhooks/github/github_webhooks.go | 9 ++++++--- plugins/inputs/webhooks/github/github_webhooks_test.go | 4 ++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index 139c7697104de..a31c6fdf2280c 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -34,9 +34,10 @@ func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) return } - - p := e.NewMetric() - gh.acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time()) + if e != nil { + p := e.NewMetric() + gh.acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time()) + } w.WriteHeader(http.StatusOK) } @@ -84,6 +85,8 @@ func NewEvent(data []byte, name string) (Event, error) { return generateEvent(data, &MembershipEvent{}) case "page_build": return generateEvent(data, &PageBuildEvent{}) + case "ping": + return nil, nil case "public": return generateEvent(data, &PublicEvent{}) case "pull_request": diff --git a/plugins/inputs/webhooks/github/github_webhooks_test.go b/plugins/inputs/webhooks/github/github_webhooks_test.go index 7bee5372d1f28..0ec9917264374 100644 --- a/plugins/inputs/webhooks/github/github_webhooks_test.go +++ b/plugins/inputs/webhooks/github/github_webhooks_test.go @@ -25,6 +25,10 @@ func TestCommitCommentEvent(t *testing.T) { GithubWebhookRequest("commit_comment", CommitCommentEventJSON(), t) } +func TestPingEvent(t *testing.T) { + GithubWebhookRequest("ping", "", t) +} + func TestDeleteEvent(t *testing.T) { GithubWebhookRequest("delete", DeleteEventJSON(), t) } From 7a8e8217318236b9fc0e5b306cbe91b1142a7472 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Mon, 6 Mar 2017 15:59:36 +0000 Subject: [PATCH 009/201] Revert "Procstat: don't cache PIDs" (#2479) --- CHANGELOG.md | 2 - plugins/inputs/procstat/procstat.go | 44 ++++++++++++++++--- plugins/inputs/procstat/procstat_test.go | 2 + plugins/inputs/procstat/spec_processor.go | 53 ++++++++++------------- 4 files changed, 61 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 323b239155194..5773179b6c29d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,8 +61,6 @@ be deprecated eventually. - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int. - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection. -- [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods -- [#1636](https://github.com/influxdata/telegraf/issues/1636): procstat - stop caching PIDs. - [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields. - [#2287](https://github.com/influxdata/telegraf/issues/2287): Kubernetes input: Handle null startTime for stopped pods. - [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty. diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 565d0ebd13b38..929490e4a2e23 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -8,6 +8,8 @@ import ( "strconv" "strings" + "github.com/shirou/gopsutil/process" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -21,12 +23,15 @@ type Procstat struct { User string PidTag bool + // pidmap maps a pid to a process object, so we don't recreate every gather + pidmap map[int32]*process.Process // tagmap maps a pid to a map of tags for that pid tagmap map[int32]map[string]string } func NewProcstat() *Procstat { return &Procstat{ + pidmap: make(map[int32]*process.Process), tagmap: make(map[int32]map[string]string), } } @@ -62,26 +67,51 @@ func (_ *Procstat) Description() string { } func (p *Procstat) Gather(acc telegraf.Accumulator) error { - pids, err := p.getAllPids() + err := p.createProcesses() if err != nil { log.Printf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", p.Exe, p.PidFile, p.Pattern, p.User, err.Error()) } else { - for _, pid := range pids { + for pid, proc := range p.pidmap { if p.PidTag { p.tagmap[pid]["pid"] = fmt.Sprint(pid) } - p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, p.tagmap[pid]) - err := p.pushMetrics() - if err != nil { - log.Printf("E! Error: procstat: %s", err.Error()) - } + p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, proc, p.tagmap[pid]) + p.pushMetrics() } } return nil } +func (p *Procstat) createProcesses() error { + var errstring string + var outerr error + + pids, err := p.getAllPids() + if err != nil { + errstring += err.Error() + " " + } + + for _, pid := range pids { + _, ok := p.pidmap[pid] + if !ok { + proc, err := process.NewProcess(pid) + if err == nil { + p.pidmap[pid] = proc + } else { + errstring += err.Error() + " " + } + } + } + + if errstring != "" { + outerr = fmt.Errorf("%s", errstring) + } + + return outerr +} + func (p *Procstat) getAllPids() ([]int32, error) { var pids []int32 var err error diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 001537178720e..ccc72bdbb2811 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -6,6 +6,7 @@ import ( "strconv" "testing" + "github.com/shirou/gopsutil/process" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,6 +24,7 @@ func TestGather(t *testing.T) { p := Procstat{ PidFile: file.Name(), Prefix: "foo", + pidmap: make(map[int32]*process.Process), tagmap: make(map[int32]map[string]string), } p.Gather(&acc) diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go index 1b9f63126acfe..3b56fbc3e212a 100644 --- a/plugins/inputs/procstat/spec_processor.go +++ b/plugins/inputs/procstat/spec_processor.go @@ -1,7 +1,6 @@ package procstat import ( - "fmt" "time" "github.com/shirou/gopsutil/process" @@ -10,13 +9,12 @@ import ( ) type SpecProcessor struct { - ProcessName string - Prefix string - pid int32 - tags map[string]string - fields map[string]interface{} - acc telegraf.Accumulator - proc *process.Process + Prefix string + pid int32 + tags map[string]string + fields map[string]interface{} + acc telegraf.Accumulator + proc *process.Process } func NewSpecProcessor( @@ -24,35 +22,29 @@ func NewSpecProcessor( prefix string, pid int32, acc telegraf.Accumulator, + p *process.Process, tags map[string]string, ) *SpecProcessor { - return &SpecProcessor{ - ProcessName: processName, - Prefix: prefix, - pid: pid, - tags: tags, - fields: make(map[string]interface{}), - acc: acc, - } -} - -func (p *SpecProcessor) pushMetrics() error { - var prefix string - proc, err := process.NewProcess(p.pid) - if err != nil { - return fmt.Errorf("Failed to open process with pid '%d'. Error: '%s'", - p.pid, err) - } - p.proc = proc - if p.ProcessName != "" { - p.tags["process_name"] = p.ProcessName + if processName != "" { + tags["process_name"] = processName } else { - name, err := p.proc.Name() + name, err := p.Name() if err == nil { - p.tags["process_name"] = name + tags["process_name"] = name } } + return &SpecProcessor{ + Prefix: prefix, + pid: pid, + tags: tags, + fields: make(map[string]interface{}), + acc: acc, + proc: p, + } +} +func (p *SpecProcessor) pushMetrics() { + var prefix string if p.Prefix != "" { prefix = p.Prefix + "_" } @@ -115,5 +107,4 @@ func (p *SpecProcessor) pushMetrics() error { } p.acc.AddFields("procstat", fields, p.tags) - return nil } From ceb36adac7b459d1b286ac20c411ea288c1558b4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 6 Mar 2017 11:20:53 -0800 Subject: [PATCH 010/201] Update issue template --- .github/ISSUE_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index b59da651a36c8..f4190e3ec1808 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,7 +1,7 @@ ## Directions GitHub Issues are reserved for actionable bug reports and feature requests. -General questions should be sent to the [InfluxDB mailing list](https://groups.google.com/forum/#!forum/influxdb). +General questions should be asked at the [InfluxData Community](https://community.influxdata.com) site. Before opening an issue, search for similar bug reports or feature requests on GitHub Issues. If no similar issue can be found, fill out either the "Bug Report" or the "Feature Request" section below. From 9df2974a0fa227c39725cc6bc2f9a4176343f996 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Tue, 28 Feb 2017 12:24:41 +0000 Subject: [PATCH 011/201] update gopsutil for file close fixes hopefully this will fix #2472 --- Godeps | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Godeps b/Godeps index de326cb19acb9..2d0419ef6a18e 100644 --- a/Godeps +++ b/Godeps @@ -44,7 +44,7 @@ github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4 github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160 github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693 -github.com/shirou/gopsutil 77b5d0080adb6f028e457906f1944d9fcca34442 +github.com/shirou/gopsutil d371ba1293cb48fedc6850526ea48b3846c54f2c github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15 github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 From 7513fcac4e3fc811549bdc132e35380c4668a88a Mon Sep 17 00:00:00 2001 From: vvvkamper Date: Thu, 2 Mar 2017 19:15:33 +0700 Subject: [PATCH 012/201] Fix part 2 of #1291 added PDH_FMT_NOCAP100 format option closes #2483 --- CHANGELOG.md | 1 + plugins/inputs/win_perf_counters/pdh.go | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5773179b6c29d..8418ffd2162ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,6 +74,7 @@ be deprecated eventually. - [#2178](https://github.com/influxdata/telegraf/issues/2178): logparser: regexp with lookahead. - [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files. - [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored. +- [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100. ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 36563d6b758e0..fa00e0603f79b 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -331,7 +331,7 @@ func PdhCollectQueryData(hQuery PDH_HQUERY) uint32 { func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, pValue *PDH_FMT_COUNTERVALUE_DOUBLE) uint32 { ret, _, _ := pdh_GetFormattedCounterValue.Call( uintptr(hCounter), - uintptr(PDH_FMT_DOUBLE), + uintptr(PDH_FMT_DOUBLE|PDH_FMT_NOCAP100), uintptr(unsafe.Pointer(lpdwType)), uintptr(unsafe.Pointer(pValue))) @@ -378,7 +378,7 @@ func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, func PdhGetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER, lpdwBufferSize *uint32, lpdwBufferCount *uint32, itemBuffer *PDH_FMT_COUNTERVALUE_ITEM_DOUBLE) uint32 { ret, _, _ := pdh_GetFormattedCounterArrayW.Call( uintptr(hCounter), - uintptr(PDH_FMT_DOUBLE), + uintptr(PDH_FMT_DOUBLE|PDH_FMT_NOCAP100), uintptr(unsafe.Pointer(lpdwBufferSize)), uintptr(unsafe.Pointer(lpdwBufferCount)), uintptr(unsafe.Pointer(itemBuffer))) From 56aa89e5c86f38e566c6ecadb568c68cd9dcaea7 Mon Sep 17 00:00:00 2001 From: Robpol86 Date: Wed, 8 Mar 2017 08:38:36 -0800 Subject: [PATCH 013/201] Exporting Ipmi.Path to be set by config. (#2498) * Exporting Ipmi.Path to be set by config. Currently "path" is not exported, giving this error when users try to override the variable via telegraf.conf as per the sample config: `field corresponding to `path' is not defined in `*ipmi_sensor.Ipmi'` Exporting the variable solves the problem. * Updating changelog. --- CHANGELOG.md | 1 + plugins/inputs/ipmi_sensor/ipmi.go | 8 ++++---- plugins/inputs/ipmi_sensor/ipmi_test.go | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8418ffd2162ae..3a8e586f15e66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -75,6 +75,7 @@ be deprecated eventually. - [#2466](https://github.com/influxdata/telegraf/issues/2466): Telegraf can crash in LoadDirectory on 0600 files. - [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored. - [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100. +- [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config. ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index b2389a67538f4..0114812d3800a 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -17,7 +17,7 @@ var ( ) type Ipmi struct { - path string + Path string Servers []string } @@ -44,7 +44,7 @@ func (m *Ipmi) Description() string { } func (m *Ipmi) Gather(acc telegraf.Accumulator) error { - if len(m.path) == 0 { + if len(m.Path) == 0 { return fmt.Errorf("ipmitool not found: verify that ipmitool is installed and that ipmitool is in your PATH") } @@ -76,7 +76,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { } opts = append(opts, "sdr") - cmd := execCommand(m.path, opts...) + cmd := execCommand(m.Path, opts...) out, err := internal.CombinedOutputTimeout(cmd, time.Second*5) if err != nil { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) @@ -149,7 +149,7 @@ func init() { m := Ipmi{} path, _ := exec.LookPath("ipmitool") if len(path) > 0 { - m.path = path + m.Path = path } inputs.Add("ipmi_sensor", func() telegraf.Input { return &m diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 94dc066c851ad..84bcdcac01b93 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -14,7 +14,7 @@ import ( func TestGather(t *testing.T) { i := &Ipmi{ Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, - path: "ipmitool", + Path: "ipmitool", } // overwriting exec commands with mock commands execCommand = fakeExecCommand @@ -118,7 +118,7 @@ func TestGather(t *testing.T) { } i = &Ipmi{ - path: "ipmitool", + Path: "ipmitool", } err = i.Gather(&acc) From ae6a5d2255f7eeadc910c424632a665275c8b598 Mon Sep 17 00:00:00 2001 From: jeremydenoun Date: Wed, 8 Mar 2017 23:08:55 +0100 Subject: [PATCH 014/201] Remove warning if parse empty content (#2500) closes #2448 --- CHANGELOG.md | 1 + metric/parse.go | 3 +++ plugins/inputs/exec/exec_test.go | 29 +++++++++++++++++++++++++++++ 3 files changed, 33 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a8e586f15e66..01eeb8bef0013 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ be deprecated eventually. - [#2215](https://github.com/influxdata/telegraf/issues/2215): Iptables input: document better that rules without a comment are ignored. - [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100. - [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config. +- [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content ## v1.2.1 [2017-02-01] diff --git a/metric/parse.go b/metric/parse.go index fe2cffdc1397e..15b88e5528e1a 100644 --- a/metric/parse.go +++ b/metric/parse.go @@ -44,6 +44,9 @@ func Parse(buf []byte) ([]telegraf.Metric, error) { } func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) { + if len(buf) == 0 { + return []telegraf.Metric{}, nil + } if len(buf) <= 6 { return []telegraf.Metric{}, makeError("buffer too short", buf, 0) } diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index 71d33fb71bf8a..975eb9642668d 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -37,6 +37,8 @@ const malformedJson = ` ` const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n" +const lineProtocolEmpty = "" +const lineProtocolShort = "ab" const lineProtocolMulti = ` cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 @@ -167,6 +169,33 @@ func TestLineProtocolParse(t *testing.T) { acc.AssertContainsTaggedFields(t, "cpu", fields, tags) } +func TestLineProtocolEmptyParse(t *testing.T) { + parser, _ := parsers.NewInfluxParser() + e := &Exec{ + runner: newRunnerMock([]byte(lineProtocolEmpty), nil), + Commands: []string{"line-protocol"}, + parser: parser, + } + + var acc testutil.Accumulator + err := e.Gather(&acc) + require.NoError(t, err) +} + +func TestLineProtocolShortParse(t *testing.T) { + parser, _ := parsers.NewInfluxParser() + e := &Exec{ + runner: newRunnerMock([]byte(lineProtocolShort), nil), + Commands: []string{"line-protocol"}, + parser: parser, + } + + var acc testutil.Accumulator + err := e.Gather(&acc) + require.Error(t, err) + assert.Contains(t, err.Error(), "buffer too short", "A buffer too short error was expected") +} + func TestLineProtocolParseMultiple(t *testing.T) { parser, _ := parsers.NewInfluxParser() e := &Exec{ From d243d69a09dd27b67c960fcead800669957fa90b Mon Sep 17 00:00:00 2001 From: Dennis Dryden Date: Thu, 9 Mar 2017 11:19:03 +0000 Subject: [PATCH 015/201] Add configuration docs to Postgresql input plugin (#2515) * Add configuration docs to Postgresql input plugin Add configuration docs to PostgreSQL input plugin README (mostly from the source code) though I've not included the configuration example that seems to use all he connections on the database[1]. [1] https://github.com/influxdata/telegraf/issues/2410 * Fix typo in readme and sampleConfig string. --- plugins/inputs/postgresql/README.md | 22 ++++++++++++++++++++++ plugins/inputs/postgresql/postgresql.go | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index e309aa80f0ffa..aed041bc6222d 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -29,3 +29,25 @@ _* value ignored and therefore not recorded._ More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW) + +## Configruation +Specify address via a url matching: + + `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]` + +All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. + +A list of databases to explicitly ignore. If not specified, metrics for all databases are gathered. Do NOT use with the 'databases' option. + + `ignored_databases = ["postgres", "template0", "template1"]` + +A list of databases to pull metrics about. If not specified, metrics for all databases are gathered. Do NOT use with the 'ignored_databases' option. + + `databases = ["app_production", "testing"]` + +### Configuration example +``` +[[inputs.postgresql]] + address = "postgres://telegraf@localhost/someDB" + ignored_databases = ["template0", "template1"] +``` diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 7019762ed2dd8..7c854dfd3885a 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -43,7 +43,7 @@ var sampleConfig = ` # ignored_databases = ["postgres", "template0", "template1"] ## A list of databases to pull metrics about. If not specified, metrics for all - ## databases are gathered. Do NOT use with the 'ignore_databases' option. + ## databases are gathered. Do NOT use with the 'ignored_databases' option. # databases = ["app_production", "testing"] ` From 49c212337f49a50729f80f7d3577c5905da638df Mon Sep 17 00:00:00 2001 From: Timothy Date: Thu, 9 Mar 2017 06:21:03 -0500 Subject: [PATCH 016/201] Update CONFIGURATION.md (#2516) Add information about default configuration file locations. Also mention that the config directory option is available. --- docs/CONFIGURATION.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 9b2eb99d88ab9..ff4814b822f12 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -24,6 +24,16 @@ Environment variables can be used anywhere in the config file, simply prepend them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) +## Configuration file locations + +The location of the configuration file can be set via the `--config` command +line flag. Telegraf will also pick up all files matching the pattern `*.conf` if +the `-config-directory` command line flag is used. + +On most systems, the default locations are `/etc/telegraf/telegraf.conf` for +the main configuration file and `/etc/telegraf/telegraf.d` for the directory of +configuration files. + # Global Tags Global tags can be specified in the `[global_tags]` section of the config file @@ -351,4 +361,4 @@ to the system load metrics due to the `namepass` parameter. [[outputs.file]] files = ["stdout"] -``` \ No newline at end of file +``` From e811e2600d16872869e09f9fb769e3c536743d45 Mon Sep 17 00:00:00 2001 From: Cameron Sparr Date: Wed, 8 Mar 2017 15:26:33 +0000 Subject: [PATCH 017/201] create telegraf.d directory in tarball closes #2513 --- CHANGELOG.md | 1 + scripts/build.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 01eeb8bef0013..f6053bd66cd27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ be deprecated eventually. - [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100. - [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config. - [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content +- [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball. ## v1.2.1 [2017-02-01] diff --git a/scripts/build.py b/scripts/build.py index 57208bf7fd182..aeaa04fd3ad8b 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -22,6 +22,7 @@ LOG_DIR = "/var/log/telegraf" SCRIPT_DIR = "/usr/lib/telegraf/scripts" CONFIG_DIR = "/etc/telegraf" +CONFIG_DIR_D = "/etc/telegraf/telegraf.d" LOGROTATE_DIR = "/etc/logrotate.d" INIT_SCRIPT = "scripts/init.sh" @@ -115,7 +116,7 @@ def create_package_fs(build_root): logging.debug("Creating a filesystem hierarchy from directory: {}".format(build_root)) # Using [1:] for the path names due to them being absolute # (will overwrite previous paths, per 'os.path.join' documentation) - dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:] ] + dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:], CONFIG_DIR_D[1:] ] for d in dirs: os.makedirs(os.path.join(build_root, d)) os.chmod(os.path.join(build_root, d), 0o755) From ea6e0b82595ffcd53f955496ec90d2e1377d0e07 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 9 Mar 2017 10:13:31 -0800 Subject: [PATCH 018/201] Fix typo in postgresql README --- plugins/inputs/postgresql/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index aed041bc6222d..e31fcff6991c1 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -30,21 +30,21 @@ _* value ignored and therefore not recorded._ More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW) -## Configruation +## Configuration Specify address via a url matching: `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]` All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. - + A list of databases to explicitly ignore. If not specified, metrics for all databases are gathered. Do NOT use with the 'databases' option. `ignored_databases = ["postgres", "template0", "template1"]` - + A list of databases to pull metrics about. If not specified, metrics for all databases are gathered. Do NOT use with the 'ignored_databases' option. `databases = ["app_production", "testing"]` - + ### Configuration example ``` [[inputs.postgresql]] From 13f314a5076a47b208aeef5bdab12470837e26c1 Mon Sep 17 00:00:00 2001 From: jeremydenoun Date: Thu, 9 Mar 2017 20:28:54 +0100 Subject: [PATCH 019/201] Report DEAD (X) State Process (#2501) Report count of processes in dead (X) process state from the processes input. This process state is only valid on Linux. --- CHANGELOG.md | 1 + plugins/inputs/system/PROCESSES_README.md | 4 +++- plugins/inputs/system/processes.go | 5 +++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f6053bd66cd27..a11752b5c170b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ be deprecated eventually. - [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs. - [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK. - [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin +- [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin. ### Bugfixes diff --git a/plugins/inputs/system/PROCESSES_README.md b/plugins/inputs/system/PROCESSES_README.md index 006e043fb8dda..aaeb279f80647 100644 --- a/plugins/inputs/system/PROCESSES_README.md +++ b/plugins/inputs/system/PROCESSES_README.md @@ -23,6 +23,7 @@ it requires access to execute `ps`. - stopped - total - zombie + - dead - wait (freebsd only) - idle (bsd only) - paging (linux only) @@ -39,6 +40,7 @@ Linux FreeBSD Darwin meaning R R R running S S S sleeping Z Z Z zombie + X none none dead T T T stopped none I I idle (sleeping for longer than about 20 seconds) D D,L U blocked (waiting in uninterruptible sleep, or locked) @@ -54,5 +56,5 @@ None ``` $ telegraf -config ~/ws/telegraf.conf -input-filter processes -test * Plugin: processes, Collection 1 -> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,paging=0i,total_threads=687i 1457478636980905042 +> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042 ``` diff --git a/plugins/inputs/system/processes.go b/plugins/inputs/system/processes.go index 0950323fde8db..202bdf058ddd6 100644 --- a/plugins/inputs/system/processes.go +++ b/plugins/inputs/system/processes.go @@ -81,6 +81,7 @@ func getEmptyFields() map[string]interface{} { case "openbsd": fields["idle"] = int64(0) case "linux": + fields["dead"] = int64(0) fields["paging"] = int64(0) fields["total_threads"] = int64(0) } @@ -107,6 +108,8 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error { fields["blocked"] = fields["blocked"].(int64) + int64(1) case 'Z': fields["zombies"] = fields["zombies"].(int64) + int64(1) + case 'X': + fields["dead"] = fields["dead"].(int64) + int64(1) case 'T': fields["stopped"] = fields["stopped"].(int64) + int64(1) case 'R': @@ -164,6 +167,8 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { fields["blocked"] = fields["blocked"].(int64) + int64(1) case 'Z': fields["zombies"] = fields["zombies"].(int64) + int64(1) + case 'X': + fields["dead"] = fields["dead"].(int64) + int64(1) case 'T', 't': fields["stopped"] = fields["stopped"].(int64) + int64(1) case 'W': From 7a5d8578467ac6a58575bd6a661a1c16f383455c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 10 Mar 2017 11:27:55 -0800 Subject: [PATCH 020/201] Add support for new SSL configuration to mongodb (#2522) closes #2519 --- CHANGELOG.md | 1 + plugins/inputs/mongodb/README.md | 9 ++++++++- plugins/inputs/mongodb/mongodb.go | 29 ++++++++++++++++++++++++++++- 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a11752b5c170b..20a0365556908 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ be deprecated eventually. - [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK. - [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin - [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin. +- [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates. ### Bugfixes diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 72f87feb84ec8..678fe07770cb0 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -11,9 +11,16 @@ ## 10.0.0.1:10000, etc. servers = ["127.0.0.1:27017"] gather_perdb_stats = false + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false ``` -For authenticated mongodb istances use connection mongdb connection URI +For authenticated mongodb instances use `mongodb://` connection URI ```toml [[inputs.mongodb]] diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 0bf822a4ceda1..a80b94690d359 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/errchan" "github.com/influxdata/telegraf/plugins/inputs" "gopkg.in/mgo.v2" @@ -20,6 +21,15 @@ type MongoDB struct { Ssl Ssl mongos map[string]*Server GatherPerdbStats bool + + // Path to CA file + SSLCA string `toml:"ssl_ca"` + // Path to host cert file + SSLCert string `toml:"ssl_cert"` + // Path to cert key file + SSLKey string `toml:"ssl_key"` + // Use SSL but skip chain & host verification + InsecureSkipVerify bool } type Ssl struct { @@ -35,6 +45,13 @@ var sampleConfig = ` ## 10.0.0.1:10000, etc. servers = ["127.0.0.1:27017"] gather_perdb_stats = false + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false ` func (m *MongoDB) SampleConfig() string { @@ -105,8 +122,11 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { dialInfo.Direct = true dialInfo.Timeout = 5 * time.Second + var tlsConfig *tls.Config + if m.Ssl.Enabled { - tlsConfig := &tls.Config{} + // Deprecated SSL config + tlsConfig = &tls.Config{} if len(m.Ssl.CaCerts) > 0 { roots := x509.NewCertPool() for _, caCert := range m.Ssl.CaCerts { @@ -119,6 +139,13 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { } else { tlsConfig.InsecureSkipVerify = true } + } else { + tlsConfig, err = internal.GetTLSConfig( + m.SSLCert, m.SSLKey, m.SSLCA, m.InsecureSkipVerify) + } + + // If configured to use TLS, add a dial function + if tlsConfig != nil { dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { conn, err := tls.Dial("tcp", addr.String(), tlsConfig) if err != nil { From 426182b81a3b78164212e3b3e9ebab0d89023934 Mon Sep 17 00:00:00 2001 From: Antoine Augusti Date: Wed, 15 Mar 2017 23:20:18 +0100 Subject: [PATCH 021/201] Update default value for Cloudwatch rate limit (#2520) --- CHANGELOG.md | 1 + plugins/inputs/cloudwatch/README.md | 7 ++++--- plugins/inputs/cloudwatch/cloudwatch.go | 9 +++++---- plugins/inputs/cloudwatch/cloudwatch_test.go | 4 ++-- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 20a0365556908..ea1ccca4d78d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,6 +79,7 @@ be deprecated eventually. - [#2483](https://github.com/influxdata/telegraf/pull/2483): Fix win_perf_counters capping values at 100. - [#2498](https://github.com/influxdata/telegraf/pull/2498): Exporting Ipmi.Path to be set by config. - [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content +- [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit - [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball. ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index 643e18c3b5a2e..3a3c708a2f052 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -42,9 +42,10 @@ API endpoint. In the following order the plugin will attempt to authenticate. namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is - ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a - ## maximum of 10. Optional - default value is 10. - ratelimit = 10 + ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a + ## maximum of 400. Optional - default value is 200. + ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html + ratelimit = 200 ## Metrics to Pull (optional) ## Defaults to all Metrics in Namespace if nothing is provided diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index a812c12651101..f0a067001bd85 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -105,9 +105,10 @@ func (c *CloudWatch) SampleConfig() string { namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is - ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a - ## maximum of 10. Optional - default value is 10. - ratelimit = 10 + ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a + ## maximum of 400. Optional - default value is 200. + ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html + ratelimit = 200 ## Metrics to Pull (optional) ## Defaults to all Metrics in Namespace if nothing is provided @@ -214,7 +215,7 @@ func init() { ttl, _ := time.ParseDuration("1hr") return &CloudWatch{ CacheTTL: internal.Duration{Duration: ttl}, - RateLimit: 10, + RateLimit: 200, } }) } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index a1bd7464b9701..f2d58a00ca9fa 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -58,7 +58,7 @@ func TestGather(t *testing.T) { Namespace: "AWS/ELB", Delay: internalDuration, Period: internalDuration, - RateLimit: 10, + RateLimit: 200, } var acc testutil.Accumulator @@ -146,7 +146,7 @@ func TestSelectMetrics(t *testing.T) { Namespace: "AWS/ELB", Delay: internalDuration, Period: internalDuration, - RateLimit: 10, + RateLimit: 200, Metrics: []*Metric{ &Metric{ MetricNames: []string{"Latency", "RequestCount"}, From 8514acdc3cbdcedce660017b6a55068743710d2e Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 17 Mar 2017 13:14:03 -0400 Subject: [PATCH 022/201] return error on unsupported serializer data format (#2542) --- CHANGELOG.md | 1 + plugins/serializers/registry.go | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea1ccca4d78d4..5ef7f580b0565 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ be deprecated eventually. - [#2500](https://github.com/influxdata/telegraf/pull/2500): Remove warning if parse empty content - [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit - [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball. +- [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format. ## v1.2.1 [2017-02-01] diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 83be4900b7233..cb1e03b46a6ea 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -1,6 +1,8 @@ package serializers import ( + "fmt" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/serializers/graphite" @@ -49,6 +51,8 @@ func NewSerializer(config *Config) (Serializer, error) { serializer, err = NewGraphiteSerializer(config.Prefix, config.Template) case "json": serializer, err = NewJsonSerializer() + default: + err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } return serializer, err } From a962e958ebf64118cdd48fd3d6ff1583a56c7702 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 17 Mar 2017 16:49:11 -0700 Subject: [PATCH 023/201] Refactor procstat input (#2540) fixes #1636 fixes #2315 --- plugins/inputs/procstat/pgrep.go | 91 +++++++ plugins/inputs/procstat/process.go | 60 +++++ plugins/inputs/procstat/procstat.go | 290 ++++++++++----------- plugins/inputs/procstat/procstat_test.go | 293 ++++++++++++++++++++-- plugins/inputs/procstat/spec_processor.go | 110 -------- testutil/accumulator.go | 23 ++ 6 files changed, 594 insertions(+), 273 deletions(-) create mode 100644 plugins/inputs/procstat/pgrep.go create mode 100644 plugins/inputs/procstat/process.go delete mode 100644 plugins/inputs/procstat/spec_processor.go diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go new file mode 100644 index 0000000000000..bae5161e4fe6d --- /dev/null +++ b/plugins/inputs/procstat/pgrep.go @@ -0,0 +1,91 @@ +package procstat + +import ( + "fmt" + "io/ioutil" + "os/exec" + "strconv" + "strings" +) + +type PIDFinder interface { + PidFile(path string) ([]PID, error) + Pattern(pattern string) ([]PID, error) + Uid(user string) ([]PID, error) + FullPattern(path string) ([]PID, error) +} + +// Implemention of PIDGatherer that execs pgrep to find processes +type Pgrep struct { + path string +} + +func NewPgrep() (PIDFinder, error) { + path, err := exec.LookPath("pgrep") + if err != nil { + return nil, fmt.Errorf("Could not find pgrep binary: %s", err) + } + return &Pgrep{path}, nil +} + +func (pg *Pgrep) PidFile(path string) ([]PID, error) { + var pids []PID + pidString, err := ioutil.ReadFile(path) + if err != nil { + return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", + path, err) + } + pid, err := strconv.Atoi(strings.TrimSpace(string(pidString))) + if err != nil { + return pids, err + } + pids = append(pids, PID(pid)) + return pids, nil +} + +func (pg *Pgrep) Pattern(pattern string) ([]PID, error) { + args := []string{pattern} + return find(pg.path, args) +} + +func (pg *Pgrep) Uid(user string) ([]PID, error) { + args := []string{"-u", user} + return find(pg.path, args) +} + +func (pg *Pgrep) FullPattern(pattern string) ([]PID, error) { + args := []string{"-f", pattern} + return find(pg.path, args) +} + +func find(path string, args []string) ([]PID, error) { + out, err := run(path, args) + if err != nil { + return nil, err + } + + return parseOutput(out) +} + +func run(path string, args []string) (string, error) { + out, err := exec.Command(path, args...).Output() + if err != nil { + return "", fmt.Errorf("Error running %s: %s", path, err) + } + return string(out), err +} + +func parseOutput(out string) ([]PID, error) { + pids := []PID{} + fields := strings.Fields(out) + for _, field := range fields { + pid, err := strconv.Atoi(field) + if err != nil { + return nil, err + } + if err == nil { + pids = append(pids, PID(pid)) + } + } + return pids, nil +} diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go new file mode 100644 index 0000000000000..ec2363f6ecf4c --- /dev/null +++ b/plugins/inputs/procstat/process.go @@ -0,0 +1,60 @@ +package procstat + +import ( + "fmt" + "time" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/process" +) + +type Process interface { + PID() PID + Tags() map[string]string + + IOCounters() (*process.IOCountersStat, error) + MemoryInfo() (*process.MemoryInfoStat, error) + Name() (string, error) + NumCtxSwitches() (*process.NumCtxSwitchesStat, error) + NumFDs() (int32, error) + NumThreads() (int32, error) + Percent(interval time.Duration) (float64, error) + Times() (*cpu.TimesStat, error) +} + +type Proc struct { + hasCPUTimes bool + tags map[string]string + *process.Process +} + +func NewProc(pid PID) (Process, error) { + process, err := process.NewProcess(int32(pid)) + if err != nil { + return nil, err + } + + proc := &Proc{ + Process: process, + hasCPUTimes: false, + tags: make(map[string]string), + } + return proc, nil +} + +func (p *Proc) Tags() map[string]string { + return p.tags +} + +func (p *Proc) PID() PID { + return PID(p.Process.Pid) +} + +func (p *Proc) Percent(interval time.Duration) (float64, error) { + cpu_perc, err := p.Process.Percent(time.Duration(0)) + if !p.hasCPUTimes && err == nil { + p.hasCPUTimes = true + return 0, fmt.Errorf("Must call Percent twice to compute percent cpu.") + } + return cpu_perc, err +} diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 929490e4a2e23..46b88fbcf80a4 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -2,18 +2,20 @@ package procstat import ( "fmt" - "io/ioutil" - "log" - "os/exec" "strconv" - "strings" - - "github.com/shirou/gopsutil/process" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +var ( + defaultPIDFinder = NewPgrep + defaultProcess = NewProc +) + +type PID int32 + type Procstat struct { PidFile string `toml:"pid_file"` Exe string @@ -23,17 +25,10 @@ type Procstat struct { User string PidTag bool - // pidmap maps a pid to a process object, so we don't recreate every gather - pidmap map[int32]*process.Process - // tagmap maps a pid to a map of tags for that pid - tagmap map[int32]map[string]string -} - -func NewProcstat() *Procstat { - return &Procstat{ - pidmap: make(map[int32]*process.Process), - tagmap: make(map[int32]map[string]string), - } + pidFinder PIDFinder + createPIDFinder func() (PIDFinder, error) + procs map[PID]Process + createProcess func(PID) (Process, error) } var sampleConfig = ` @@ -67,174 +62,179 @@ func (_ *Procstat) Description() string { } func (p *Procstat) Gather(acc telegraf.Accumulator) error { - err := p.createProcesses() + procs, err := p.updateProcesses(p.procs) if err != nil { - log.Printf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", + return fmt.Errorf( + "E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", p.Exe, p.PidFile, p.Pattern, p.User, err.Error()) - } else { - for pid, proc := range p.pidmap { - if p.PidTag { - p.tagmap[pid]["pid"] = fmt.Sprint(pid) - } - p := NewSpecProcessor(p.ProcessName, p.Prefix, pid, acc, proc, p.tagmap[pid]) - p.pushMetrics() - } + } + p.procs = procs + + for _, proc := range p.procs { + p.addMetrics(proc, acc) } return nil } -func (p *Procstat) createProcesses() error { - var errstring string - var outerr error - - pids, err := p.getAllPids() - if err != nil { - errstring += err.Error() + " " +// Add metrics a single Process +func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { + var prefix string + if p.Prefix != "" { + prefix = p.Prefix + "_" } - for _, pid := range pids { - _, ok := p.pidmap[pid] - if !ok { - proc, err := process.NewProcess(pid) - if err == nil { - p.pidmap[pid] = proc - } else { - errstring += err.Error() + " " - } + fields := map[string]interface{}{} + + //If process_name tag is not already set, set to actual name + if _, nameInTags := proc.Tags()["process_name"]; !nameInTags { + name, err := proc.Name() + if err == nil { + proc.Tags()["process_name"] = name } } - if errstring != "" { - outerr = fmt.Errorf("%s", errstring) + //If pid is not present as a tag, include it as a field. + if _, pidInTags := proc.Tags()["pid"]; !pidInTags { + fields["pid"] = int32(proc.PID()) } - return outerr -} + numThreads, err := proc.NumThreads() + if err == nil { + fields[prefix+"num_threads"] = numThreads + } -func (p *Procstat) getAllPids() ([]int32, error) { - var pids []int32 - var err error + fds, err := proc.NumFDs() + if err == nil { + fields[prefix+"num_fds"] = fds + } - if p.PidFile != "" { - pids, err = p.pidsFromFile() - } else if p.Exe != "" { - pids, err = p.pidsFromExe() - } else if p.Pattern != "" { - pids, err = p.pidsFromPattern() - } else if p.User != "" { - pids, err = p.pidsFromUser() - } else { - err = fmt.Errorf("Either exe, pid_file, user, or pattern has to be specified") + ctx, err := proc.NumCtxSwitches() + if err == nil { + fields[prefix+"voluntary_context_switches"] = ctx.Voluntary + fields[prefix+"involuntary_context_switches"] = ctx.Involuntary } - return pids, err -} + io, err := proc.IOCounters() + if err == nil { + fields[prefix+"read_count"] = io.ReadCount + fields[prefix+"write_count"] = io.WriteCount + fields[prefix+"read_bytes"] = io.ReadBytes + fields[prefix+"write_bytes"] = io.WriteBytes + } -func (p *Procstat) pidsFromFile() ([]int32, error) { - var out []int32 - var outerr error - pidString, err := ioutil.ReadFile(p.PidFile) - if err != nil { - outerr = fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", - p.PidFile, err) - } else { - pid, err := strconv.Atoi(strings.TrimSpace(string(pidString))) - if err != nil { - outerr = err - } else { - out = append(out, int32(pid)) - p.tagmap[int32(pid)] = map[string]string{ - "pidfile": p.PidFile, - } - } + cpu_time, err := proc.Times() + if err == nil { + fields[prefix+"cpu_time_user"] = cpu_time.User + fields[prefix+"cpu_time_system"] = cpu_time.System + fields[prefix+"cpu_time_idle"] = cpu_time.Idle + fields[prefix+"cpu_time_nice"] = cpu_time.Nice + fields[prefix+"cpu_time_iowait"] = cpu_time.Iowait + fields[prefix+"cpu_time_irq"] = cpu_time.Irq + fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq + fields[prefix+"cpu_time_steal"] = cpu_time.Steal + fields[prefix+"cpu_time_stolen"] = cpu_time.Stolen + fields[prefix+"cpu_time_guest"] = cpu_time.Guest + fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice } - return out, outerr + + cpu_perc, err := proc.Percent(time.Duration(0)) + if err == nil { + fields[prefix+"cpu_usage"] = cpu_perc + } + + mem, err := proc.MemoryInfo() + if err == nil { + fields[prefix+"memory_rss"] = mem.RSS + fields[prefix+"memory_vms"] = mem.VMS + fields[prefix+"memory_swap"] = mem.Swap + } + + acc.AddFields("procstat", fields, proc.Tags()) } -func (p *Procstat) pidsFromExe() ([]int32, error) { - var out []int32 - var outerr error - bin, err := exec.LookPath("pgrep") +// Update monitored Processes +func (p *Procstat) updateProcesses(prevInfo map[PID]Process) (map[PID]Process, error) { + pids, tags, err := p.findPids() if err != nil { - return out, fmt.Errorf("Couldn't find pgrep binary: %s", err) + return nil, err } - pgrep, err := exec.Command(bin, p.Exe).Output() - if err != nil { - return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err) - } else { - pids := strings.Fields(string(pgrep)) - for _, pid := range pids { - ipid, err := strconv.Atoi(pid) - if err == nil { - out = append(out, int32(ipid)) - p.tagmap[int32(ipid)] = map[string]string{ - "exe": p.Exe, - } - } else { - outerr = err + + procs := make(map[PID]Process, len(prevInfo)) + + for _, pid := range pids { + info, ok := prevInfo[pid] + if ok { + procs[pid] = info + } else { + proc, err := p.createProcess(pid) + if err != nil { + // No problem; process may have ended after we found it + continue + } + procs[pid] = proc + + // Add initial tags + for k, v := range tags { + proc.Tags()[k] = v + } + + // Add pid tag if needed + if p.PidTag { + proc.Tags()["pid"] = strconv.Itoa(int(pid)) + } + if p.ProcessName != "" { + proc.Tags()["process_name"] = p.ProcessName } } } - return out, outerr + return procs, nil } -func (p *Procstat) pidsFromPattern() ([]int32, error) { - var out []int32 - var outerr error - bin, err := exec.LookPath("pgrep") - if err != nil { - return out, fmt.Errorf("Couldn't find pgrep binary: %s", err) - } - pgrep, err := exec.Command(bin, "-f", p.Pattern).Output() - if err != nil { - return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err) - } else { - pids := strings.Fields(string(pgrep)) - for _, pid := range pids { - ipid, err := strconv.Atoi(pid) - if err == nil { - out = append(out, int32(ipid)) - p.tagmap[int32(ipid)] = map[string]string{ - "pattern": p.Pattern, - } - } else { - outerr = err - } +// Create and return PIDGatherer lazily +func (p *Procstat) getPIDFinder() (PIDFinder, error) { + if p.pidFinder == nil { + f, err := p.createPIDFinder() + if err != nil { + return nil, err } + p.pidFinder = f } - return out, outerr + return p.pidFinder, nil } -func (p *Procstat) pidsFromUser() ([]int32, error) { - var out []int32 - var outerr error - bin, err := exec.LookPath("pgrep") +// Get matching PIDs and their initial tags +func (p *Procstat) findPids() ([]PID, map[string]string, error) { + var pids []PID + var tags map[string]string + var err error + + f, err := p.getPIDFinder() if err != nil { - return out, fmt.Errorf("Couldn't find pgrep binary: %s", err) + return nil, nil, err } - pgrep, err := exec.Command(bin, "-u", p.User).Output() - if err != nil { - return out, fmt.Errorf("Failed to execute %s. Error: '%s'", bin, err) + + if p.PidFile != "" { + pids, err = f.PidFile(p.PidFile) + tags = map[string]string{"pidfile": p.PidFile} + } else if p.Exe != "" { + pids, err = f.Pattern(p.Exe) + tags = map[string]string{"exe": p.Exe} + } else if p.Pattern != "" { + pids, err = f.FullPattern(p.Pattern) + tags = map[string]string{"pattern": p.Pattern} + } else if p.User != "" { + pids, err = f.Uid(p.User) + tags = map[string]string{"user": p.User} } else { - pids := strings.Fields(string(pgrep)) - for _, pid := range pids { - ipid, err := strconv.Atoi(pid) - if err == nil { - out = append(out, int32(ipid)) - p.tagmap[int32(ipid)] = map[string]string{ - "user": p.User, - } - } else { - outerr = err - } - } + err = fmt.Errorf("Either exe, pid_file, user, or pattern has to be specified") } - return out, outerr + + return pids, tags, err } func init() { inputs.Add("procstat", func() telegraf.Input { - return NewProcstat() + return &Procstat{} }) } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index ccc72bdbb2811..1f6f2764253fd 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -1,33 +1,290 @@ package procstat import ( - "io/ioutil" + "fmt" "os" - "strconv" "testing" + "time" + "github.com/influxdata/telegraf/testutil" + "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/process" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/influxdata/telegraf/testutil" ) -func TestGather(t *testing.T) { +type testPgrep struct { + pids []PID + err error +} + +func pidFinder(pids []PID, err error) func() (PIDFinder, error) { + return func() (PIDFinder, error) { + return &testPgrep{ + pids: pids, + err: err, + }, nil + } +} + +func (pg *testPgrep) PidFile(path string) ([]PID, error) { + return pg.pids, pg.err +} + +func (pg *testPgrep) Pattern(pattern string) ([]PID, error) { + return pg.pids, pg.err +} + +func (pg *testPgrep) Uid(user string) ([]PID, error) { + return pg.pids, pg.err +} + +func (pg *testPgrep) FullPattern(pattern string) ([]PID, error) { + return pg.pids, pg.err +} + +type testProc struct { + pid PID + tags map[string]string +} + +func newTestProc(pid PID) (Process, error) { + proc := &testProc{ + tags: make(map[string]string), + } + return proc, nil +} + +func (p *testProc) PID() PID { + return p.pid +} + +func (p *testProc) Tags() map[string]string { + return p.tags +} + +func (p *testProc) IOCounters() (*process.IOCountersStat, error) { + return &process.IOCountersStat{}, nil +} + +func (p *testProc) MemoryInfo() (*process.MemoryInfoStat, error) { + return &process.MemoryInfoStat{}, nil +} + +func (p *testProc) Name() (string, error) { + return "test_proc", nil +} + +func (p *testProc) NumCtxSwitches() (*process.NumCtxSwitchesStat, error) { + return &process.NumCtxSwitchesStat{}, nil +} + +func (p *testProc) NumFDs() (int32, error) { + return 0, nil +} + +func (p *testProc) NumThreads() (int32, error) { + return 0, nil +} + +func (p *testProc) Percent(interval time.Duration) (float64, error) { + return 0, nil +} + +func (p *testProc) Times() (*cpu.TimesStat, error) { + return &cpu.TimesStat{}, nil +} + +var pid PID = PID(42) +var exe string = "foo" + +func TestGather_CreateProcessErrorOk(t *testing.T) { + var acc testutil.Accumulator + + p := Procstat{ + Exe: exe, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: func(PID) (Process, error) { + return nil, fmt.Errorf("createProcess error") + }, + } + require.NoError(t, p.Gather(&acc)) +} + +func TestGather_CreatePIDFinderError(t *testing.T) { + var acc testutil.Accumulator + + p := Procstat{ + createPIDFinder: func() (PIDFinder, error) { + return nil, fmt.Errorf("createPIDFinder error") + }, + createProcess: newTestProc, + } + require.Error(t, p.Gather(&acc)) +} + +func TestGather_ProcessName(t *testing.T) { + var acc testutil.Accumulator + + p := Procstat{ + Exe: exe, + ProcessName: "custom_name", + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.NoError(t, p.Gather(&acc)) + + assert.Equal(t, "custom_name", acc.TagValue("procstat", "process_name")) +} + +func TestGather_NoProcessNameUsesReal(t *testing.T) { var acc testutil.Accumulator - pid := os.Getpid() - file, err := ioutil.TempFile(os.TempDir(), "telegraf") - require.NoError(t, err) - file.Write([]byte(strconv.Itoa(pid))) - file.Close() - defer os.Remove(file.Name()) + pid := PID(os.Getpid()) + p := Procstat{ - PidFile: file.Name(), - Prefix: "foo", - pidmap: make(map[int32]*process.Process), - tagmap: make(map[int32]map[string]string), + Exe: exe, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, } - p.Gather(&acc) - assert.True(t, acc.HasFloatField("procstat", "foo_cpu_time_user")) - assert.True(t, acc.HasUIntField("procstat", "foo_memory_vms")) + require.NoError(t, p.Gather(&acc)) + + assert.True(t, acc.HasTag("procstat", "process_name")) +} + +func TestGather_NoPidTag(t *testing.T) { + var acc testutil.Accumulator + + p := Procstat{ + Exe: exe, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.NoError(t, p.Gather(&acc)) + assert.True(t, acc.HasInt32Field("procstat", "pid")) + assert.False(t, acc.HasTag("procstat", "pid")) +} + +func TestGather_PidTag(t *testing.T) { + var acc testutil.Accumulator + + p := Procstat{ + Exe: exe, + PidTag: true, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.NoError(t, p.Gather(&acc)) + assert.Equal(t, "42", acc.TagValue("procstat", "pid")) + assert.False(t, acc.HasInt32Field("procstat", "pid")) +} + +func TestGather_Prefix(t *testing.T) { + var acc testutil.Accumulator + + p := Procstat{ + Exe: exe, + Prefix: "custom_prefix", + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.NoError(t, p.Gather(&acc)) + assert.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds")) +} + +func TestGather_Exe(t *testing.T) { + var acc testutil.Accumulator + + p := Procstat{ + Exe: exe, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.NoError(t, p.Gather(&acc)) + + assert.Equal(t, exe, acc.TagValue("procstat", "exe")) +} + +func TestGather_User(t *testing.T) { + var acc testutil.Accumulator + user := "ada" + + p := Procstat{ + User: user, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.NoError(t, p.Gather(&acc)) + + assert.Equal(t, user, acc.TagValue("procstat", "user")) +} + +func TestGather_Pattern(t *testing.T) { + var acc testutil.Accumulator + pattern := "foo" + + p := Procstat{ + Pattern: pattern, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.NoError(t, p.Gather(&acc)) + + assert.Equal(t, pattern, acc.TagValue("procstat", "pattern")) +} + +func TestGather_MissingPidMethod(t *testing.T) { + var acc testutil.Accumulator + + p := Procstat{ + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.Error(t, p.Gather(&acc)) +} + +func TestGather_PidFile(t *testing.T) { + var acc testutil.Accumulator + pidfile := "/path/to/pidfile" + + p := Procstat{ + PidFile: pidfile, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: newTestProc, + } + require.NoError(t, p.Gather(&acc)) + + assert.Equal(t, pidfile, acc.TagValue("procstat", "pidfile")) +} + +func TestGather_PercentFirstPass(t *testing.T) { + var acc testutil.Accumulator + pid := PID(os.Getpid()) + + p := Procstat{ + Pattern: "foo", + PidTag: true, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: NewProc, + } + require.NoError(t, p.Gather(&acc)) + + assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) + assert.False(t, acc.HasFloatField("procstat", "cpu_usage")) +} + +func TestGather_PercentSecondPass(t *testing.T) { + var acc testutil.Accumulator + pid := PID(os.Getpid()) + + p := Procstat{ + Pattern: "foo", + PidTag: true, + createPIDFinder: pidFinder([]PID{pid}, nil), + createProcess: NewProc, + } + require.NoError(t, p.Gather(&acc)) + require.NoError(t, p.Gather(&acc)) + + assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) + assert.True(t, acc.HasFloatField("procstat", "cpu_usage")) } diff --git a/plugins/inputs/procstat/spec_processor.go b/plugins/inputs/procstat/spec_processor.go deleted file mode 100644 index 3b56fbc3e212a..0000000000000 --- a/plugins/inputs/procstat/spec_processor.go +++ /dev/null @@ -1,110 +0,0 @@ -package procstat - -import ( - "time" - - "github.com/shirou/gopsutil/process" - - "github.com/influxdata/telegraf" -) - -type SpecProcessor struct { - Prefix string - pid int32 - tags map[string]string - fields map[string]interface{} - acc telegraf.Accumulator - proc *process.Process -} - -func NewSpecProcessor( - processName string, - prefix string, - pid int32, - acc telegraf.Accumulator, - p *process.Process, - tags map[string]string, -) *SpecProcessor { - if processName != "" { - tags["process_name"] = processName - } else { - name, err := p.Name() - if err == nil { - tags["process_name"] = name - } - } - return &SpecProcessor{ - Prefix: prefix, - pid: pid, - tags: tags, - fields: make(map[string]interface{}), - acc: acc, - proc: p, - } -} - -func (p *SpecProcessor) pushMetrics() { - var prefix string - if p.Prefix != "" { - prefix = p.Prefix + "_" - } - fields := map[string]interface{}{} - - //If pid is not present as a tag, include it as a field. - if _, pidInTags := p.tags["pid"]; !pidInTags { - fields["pid"] = p.pid - } - - numThreads, err := p.proc.NumThreads() - if err == nil { - fields[prefix+"num_threads"] = numThreads - } - - fds, err := p.proc.NumFDs() - if err == nil { - fields[prefix+"num_fds"] = fds - } - - ctx, err := p.proc.NumCtxSwitches() - if err == nil { - fields[prefix+"voluntary_context_switches"] = ctx.Voluntary - fields[prefix+"involuntary_context_switches"] = ctx.Involuntary - } - - io, err := p.proc.IOCounters() - if err == nil { - fields[prefix+"read_count"] = io.ReadCount - fields[prefix+"write_count"] = io.WriteCount - fields[prefix+"read_bytes"] = io.ReadBytes - fields[prefix+"write_bytes"] = io.WriteBytes - } - - cpu_time, err := p.proc.Times() - if err == nil { - fields[prefix+"cpu_time_user"] = cpu_time.User - fields[prefix+"cpu_time_system"] = cpu_time.System - fields[prefix+"cpu_time_idle"] = cpu_time.Idle - fields[prefix+"cpu_time_nice"] = cpu_time.Nice - fields[prefix+"cpu_time_iowait"] = cpu_time.Iowait - fields[prefix+"cpu_time_irq"] = cpu_time.Irq - fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq - fields[prefix+"cpu_time_steal"] = cpu_time.Steal - fields[prefix+"cpu_time_stolen"] = cpu_time.Stolen - fields[prefix+"cpu_time_guest"] = cpu_time.Guest - fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice - } - - cpu_perc, err := p.proc.Percent(time.Duration(0)) - if err == nil && cpu_perc != 0 { - fields[prefix+"cpu_usage"] = cpu_perc - } - - mem, err := p.proc.MemoryInfo() - if err == nil { - fields[prefix+"memory_rss"] = mem.RSS - fields[prefix+"memory_vms"] = mem.VMS - fields[prefix+"memory_swap"] = mem.Swap - } - - p.acc.AddFields("procstat", fields, p.tags) -} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 25e60920ba905..63dfddd7ada80 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -161,6 +161,29 @@ func (a *Accumulator) Get(measurement string) (*Metric, bool) { return nil, false } +func (a *Accumulator) HasTag(measurement string, key string) bool { + for _, p := range a.Metrics { + if p.Measurement == measurement { + _, ok := p.Tags[key] + return ok + } + } + return false +} + +func (a *Accumulator) TagValue(measurement string, key string) string { + for _, p := range a.Metrics { + if p.Measurement == measurement { + v, ok := p.Tags[key] + if !ok { + return "" + } + return v + } + } + return "" +} + // NFields returns the total number of fields in the accumulator, across all // measurements func (a *Accumulator) NFields() int { From bb28fb256b4595676714f22b2742738f96a184e5 Mon Sep 17 00:00:00 2001 From: Leandro Piccilli Date: Tue, 21 Mar 2017 01:47:57 +0100 Subject: [PATCH 024/201] Add Elasticsearch 5.x output (#2332) --- Godeps | 1 + Makefile | 6 +- README.md | 1 + plugins/outputs/all/all.go | 1 + plugins/outputs/elasticsearch/README.md | 218 +++++++++++++ .../outputs/elasticsearch/elasticsearch.go | 308 ++++++++++++++++++ .../elasticsearch/elasticsearch_test.go | 126 +++++++ 7 files changed, 659 insertions(+), 2 deletions(-) create mode 100644 plugins/outputs/elasticsearch/README.md create mode 100644 plugins/outputs/elasticsearch/elasticsearch.go create mode 100644 plugins/outputs/elasticsearch/elasticsearch_test.go diff --git a/Godeps b/Godeps index 2d0419ef6a18e..6cbe9efa72c19 100644 --- a/Godeps +++ b/Godeps @@ -59,4 +59,5 @@ golang.org/x/text 506f9d5c962f284575e88337e7d9296d27e729d3 gopkg.in/dancannon/gorethink.v1 edc7a6a68e2d8015f5ffe1b2560eed989f8a45be gopkg.in/fatih/pool.v2 6e328e67893eb46323ad06f0e92cb9536babbabc gopkg.in/mgo.v2 3f83fa5005286a7fe593b055f0d7771a7dce4655 +gopkg.in/olivere/elastic.v5 ee3ebceab960cf68ab9a89ee6d78c031ef5b4a4e gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6 diff --git a/Makefile b/Makefile index 79276f887c8b0..d2bad656d0abc 100644 --- a/Makefile +++ b/Makefile @@ -51,6 +51,7 @@ docker-run: -e ADVERTISED_PORT=9092 \ -p "2181:2181" -p "9092:9092" \ -d spotify/kafka + docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5 docker run --name mysql -p "3306:3306" -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -d mysql docker run --name memcached -p "11211:11211" -d memcached docker run --name postgres -p "5432:5432" -d postgres @@ -69,6 +70,7 @@ docker-run-circle: -e ADVERTISED_PORT=9092 \ -p "2181:2181" -p "9092:9092" \ -d spotify/kafka + docker run --name elasticsearch -p "9200:9200" -p "9300:9300" -d elasticsearch:5 docker run --name nsq -p "4150:4150" -d nsqio/nsq /nsqd docker run --name mqtt -p "1883:1883" -d ncarlier/mqtt docker run --name riemann -p "5555:5555" -d stealthly/docker-riemann @@ -76,8 +78,8 @@ docker-run-circle: # Kill all docker containers, ignore errors docker-kill: - -docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats - -docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats + -docker kill nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch + -docker rm nsq aerospike redis rabbitmq postgres memcached mysql kafka mqtt riemann nats elasticsearch # Run full unit tests using docker containers (includes setup and teardown) test: vet docker-kill docker-run diff --git a/README.md b/README.md index 915c7b7612d91..90686271439aa 100644 --- a/README.md +++ b/README.md @@ -211,6 +211,7 @@ Telegraf can also collect metrics via the following service plugins: * [aws cloudwatch](./plugins/outputs/cloudwatch) * [datadog](./plugins/outputs/datadog) * [discard](./plugins/outputs/discard) +* [elasticsearch](./plugins/outputs/elasticsearch) * [file](./plugins/outputs/file) * [graphite](./plugins/outputs/graphite) * [graylog](./plugins/outputs/graylog) diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index eec2b95e3f5e0..089a5690977e5 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -6,6 +6,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/outputs/datadog" _ "github.com/influxdata/telegraf/plugins/outputs/discard" + _ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" _ "github.com/influxdata/telegraf/plugins/outputs/file" _ "github.com/influxdata/telegraf/plugins/outputs/graphite" _ "github.com/influxdata/telegraf/plugins/outputs/graylog" diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md new file mode 100644 index 0000000000000..620d5a82cc3ef --- /dev/null +++ b/plugins/outputs/elasticsearch/README.md @@ -0,0 +1,218 @@ +## Elasticsearch Output Plugin for Telegraf + +This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Elastic (http://olivere.github.io/elastic/). + +Currently it only supports Elasticsearch 5.x series. + +## Elasticsearch indexes and templates + +### Indexes per time-frame + +This plugin can manage indexes per time-frame, as commonly done in other tools with Elasticsearch. + +The timestamp of the metric collected will be used to decide the index destination. + +For more information about this usage on Elasticsearch, check https://www.elastic.co/guide/en/elasticsearch/guide/master/time-based.html#index-per-timeframe + +### Template management + +Index templates are used in Elasticsearch to define settings and mappings for the indexes and how the fields should be analyzed. +For more information on how this works, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html + +This plugin can create a working template for use with telegraf metrics. It uses Elasticsearch dynamic templates feature to set proper types for the tags and metrics fields. +If the template specified already exists, it will not overwrite unless you configure this plugin to do so. Thus you can customize this template after its creation if necessary. + +Example of an index template created by telegraf: + +```json +{ + "order": 0, + "template": "telegraf-*", + "settings": { + "index": { + "mapping": { + "total_fields": { + "limit": "5000" + } + }, + "refresh_interval": "10s" + } + }, + "mappings": { + "_default_": { + "dynamic_templates": [ + { + "tags": { + "path_match": "tag.*", + "mapping": { + "ignore_above": 512, + "type": "keyword" + }, + "match_mapping_type": "string" + } + }, + { + "metrics_long": { + "mapping": { + "index": false, + "type": "float" + }, + "match_mapping_type": "long" + } + }, + { + "metrics_double": { + "mapping": { + "index": false, + "type": "float" + }, + "match_mapping_type": "double" + } + }, + { + "text_fields": { + "mapping": { + "norms": false + }, + "match": "*" + } + } + ], + "_all": { + "enabled": false + }, + "properties": { + "@timestamp": { + "type": "date" + }, + "measurement_name": { + "type": "keyword" + } + } + } + }, + "aliases": {} +} + +``` + +### Example events: + +This plugin will format the events in the following way: + +```json +{ + "@timestamp": "2017-01-01T00:00:00+00:00", + "measurement_name": "cpu", + "cpu": { + "usage_guest": 0, + "usage_guest_nice": 0, + "usage_idle": 71.85413456197966, + "usage_iowait": 0.256805341656516, + "usage_irq": 0, + "usage_nice": 0, + "usage_softirq": 0.2054442732579466, + "usage_steal": 0, + "usage_system": 15.04879301548127, + "usage_user": 12.634822807288275 + }, + "tag": { + "cpu": "cpu-total", + "host": "elastichost", + "dc": "datacenter1" + } +} +``` + +```json +{ + "@timestamp": "2017-01-01T00:00:00+00:00", + "measurement_name": "system", + "system": { + "load1": 0.78, + "load15": 0.8, + "load5": 0.8, + "n_cpus": 2, + "n_users": 2 + }, + "tag": { + "host": "elastichost", + "dc": "datacenter1" + } +} +``` + +### Configuration: + +```toml +# Configuration for Elasticsearch to send metrics to. +[[outputs.elasticsearch]] + ## The full HTTP endpoint URL for your Elasticsearch instance + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + urls = [ "http://node1.es.example.com:9200" ] # required. + ## Elasticsearch client timeout, defaults to "5s" if not set. + timeout = "5s" + ## Set to true to ask Elasticsearch a list of all cluster nodes, + ## thus it is not necessary to list all nodes in the urls config option + enable_sniffer = false + ## Set the interval to check if the Elasticsearch nodes are available + ## Setting to "0s" will disable the health check (not recommended in production) + health_check_interval = "10s" + ## HTTP basic authentication details (eg. when using Shield) + # username = "telegraf" + # password = "mypassword" + + ## Index Config + ## The target index for metrics (Elasticsearch will create if it not exists). + ## You can use the date specifiers below to create indexes per time frame. + ## The metric timestamp will be used to decide the destination index name + # %Y - year (2016) + # %y - last two digits of year (00..99) + # %m - month (01..12) + # %d - day of month (e.g., 01) + # %H - hour (00..23) + index_name = "telegraf-%Y.%m.%d" # required. + + ## Template Config + ## Set to true if you want telegraf to manage its index template. + ## If enabled it will create a recommended index template for telegraf indexes + manage_template = true + ## The template name used for telegraf indexes + template_name = "telegraf" + ## Set to true if you want telegraf to overwrite an existing template + overwrite_template = false +``` + +### Required parameters: + +* `urls`: A list containing the full HTTP URL of one or more nodes from your Elasticsearch instance. +* `index_name`: The target index for metrics. You can use the date specifiers below to create indexes per time frame. + +``` %Y - year (2017) + %y - last two digits of year (00..99) + %m - month (01..12) + %d - day of month (e.g., 01) + %H - hour (00..23) +``` + +### Optional parameters: + +* `timeout`: Elasticsearch client timeout, defaults to "5s" if not set. +* `enable_sniffer`: Set to true to ask Elasticsearch a list of all cluster nodes, thus it is not necessary to list all nodes in the urls config option. +* `health_check_interval`: Set the interval to check if the nodes are available, in seconds. Setting to 0 will disable the health check (not recommended in production). +* `username`: The username for HTTP basic authentication details (eg. when using Shield). +* `password`: The password for HTTP basic authentication details (eg. when using Shield). +* `manage_template`: Set to true if you want telegraf to manage its index template. If enabled it will create a recommended index template for telegraf indexes. +* `template_name`: The template name used for telegraf indexes. +* `overwrite_template`: Set to true if you want telegraf to overwrite an existing template. + +## Known issues + +Integer values collected that are bigger than 2^63 and smaller than 1e21 (or in this exact same window of their negative counterparts) are encoded by golang JSON encoder in decimal format and that is not fully supported by Elasticsearch dynamic field mapping. This causes the metrics with such values to be dropped in case a field mapping has not been created yet on the telegraf index. If that's the case you will see an exception on Elasticsearch side like this: + +```{"error":{"root_cause":[{"type":"mapper_parsing_exception","reason":"failed to parse"}],"type":"mapper_parsing_exception","reason":"failed to parse","caused_by":{"type":"illegal_state_exception","reason":"No matching token for number_type [BIG_INTEGER]"}},"status":400}``` + +The correct field mapping will be created on the telegraf index as soon as a supported JSON value is received by Elasticsearch, and subsequent insertions will work because the field mapping will already exist. + +This issue is caused by the way Elasticsearch tries to detect integer fields, and by how golang encodes numbers in JSON. There is no clear workaround for this at the moment. \ No newline at end of file diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go new file mode 100644 index 0000000000000..dbd359b901ae3 --- /dev/null +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -0,0 +1,308 @@ +package elasticsearch + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "gopkg.in/olivere/elastic.v5" +) + +type Elasticsearch struct { + URLs []string `toml:"urls"` + IndexName string + Username string + Password string + EnableSniffer bool + Timeout internal.Duration + HealthCheckInterval internal.Duration + ManageTemplate bool + TemplateName string + OverwriteTemplate bool + Client *elastic.Client +} + +var sampleConfig = ` + ## The full HTTP endpoint URL for your Elasticsearch instance + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + urls = [ "http://node1.es.example.com:9200" ] # required. + ## Elasticsearch client timeout, defaults to "5s" if not set. + timeout = "5s" + ## Set to true to ask Elasticsearch a list of all cluster nodes, + ## thus it is not necessary to list all nodes in the urls config option. + enable_sniffer = false + ## Set the interval to check if the Elasticsearch nodes are available + ## Setting to "0s" will disable the health check (not recommended in production) + health_check_interval = "10s" + ## HTTP basic authentication details (eg. when using Shield) + # username = "telegraf" + # password = "mypassword" + + ## Index Config + ## The target index for metrics (Elasticsearch will create if it not exists). + ## You can use the date specifiers below to create indexes per time frame. + ## The metric timestamp will be used to decide the destination index name + # %Y - year (2016) + # %y - last two digits of year (00..99) + # %m - month (01..12) + # %d - day of month (e.g., 01) + # %H - hour (00..23) + index_name = "telegraf-%Y.%m.%d" # required. + + ## Template Config + ## Set to true if you want telegraf to manage its index template. + ## If enabled it will create a recommended index template for telegraf indexes + manage_template = true + ## The template name used for telegraf indexes + template_name = "telegraf" + ## Set to true if you want telegraf to overwrite an existing template + overwrite_template = false +` + +func (a *Elasticsearch) Connect() error { + if a.URLs == nil || a.IndexName == "" { + return fmt.Errorf("Elasticsearch urls or index_name is not defined") + } + + ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) + defer cancel() + + var clientOptions []elastic.ClientOptionFunc + + clientOptions = append(clientOptions, + elastic.SetSniff(a.EnableSniffer), + elastic.SetURL(a.URLs...), + elastic.SetHealthcheckInterval(a.HealthCheckInterval.Duration), + ) + + if a.Username != "" && a.Password != "" { + clientOptions = append(clientOptions, + elastic.SetBasicAuth(a.Username, a.Password), + ) + } + + if a.HealthCheckInterval.Duration == 0 { + clientOptions = append(clientOptions, + elastic.SetHealthcheck(false), + ) + log.Printf("D! Elasticsearch output: disabling health check") + } + + client, err := elastic.NewClient(clientOptions...) + + if err != nil { + return err + } + + // check for ES version on first node + esVersion, err := client.ElasticsearchVersion(a.URLs[0]) + + if err != nil { + return fmt.Errorf("Elasticsearch version check failed: %s", err) + } + + // quit if ES version is not supported + i, err := strconv.Atoi(strings.Split(esVersion, ".")[0]) + if err != nil || i < 5 { + return fmt.Errorf("Elasticsearch version not supported: %s", esVersion) + } + + log.Println("I! Elasticsearch version: " + esVersion) + + a.Client = client + + if a.ManageTemplate { + err := a.manageTemplate(ctx) + if err != nil { + return err + } + } + + return nil +} + +func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { + if len(metrics) == 0 { + return nil + } + + bulkRequest := a.Client.Bulk() + + for _, metric := range metrics { + var name = metric.Name() + + // index name has to be re-evaluated each time for telegraf + // to send the metric to the correct time-based index + indexName := a.GetIndexName(a.IndexName, metric.Time()) + + m := make(map[string]interface{}) + + m["@timestamp"] = metric.Time() + m["measurement_name"] = name + m["tag"] = metric.Tags() + m[name] = metric.Fields() + + bulkRequest.Add(elastic.NewBulkIndexRequest(). + Index(indexName). + Type("metrics"). + Doc(m)) + + } + + ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) + defer cancel() + + res, err := bulkRequest.Do(ctx) + + if err != nil { + return fmt.Errorf("Error sending bulk request to Elasticsearch: %s", err) + } + + if res.Errors { + for id, err := range res.Failed() { + log.Printf("E! Elasticsearch indexing failure, id: %d, error: %s, caused by: %s, %s", id, err.Error.Reason, err.Error.CausedBy["reason"], err.Error.CausedBy["type"]) + } + return fmt.Errorf("W! Elasticsearch failed to index %d metrics", len(res.Failed())) + } + + return nil + +} + +func (a *Elasticsearch) manageTemplate(ctx context.Context) error { + if a.TemplateName == "" { + return fmt.Errorf("Elasticsearch template_name configuration not defined") + } + + templateExists, errExists := a.Client.IndexTemplateExists(a.TemplateName).Do(ctx) + + if errExists != nil { + return fmt.Errorf("Elasticsearch template check failed, template name: %s, error: %s", a.TemplateName, errExists) + } + + templatePattern := a.IndexName + "*" + + if strings.Contains(a.IndexName, "%") { + templatePattern = a.IndexName[0:strings.Index(a.IndexName, "%")] + "*" + } + + if (a.OverwriteTemplate) || (!templateExists) { + // Create or update the template + tmpl := fmt.Sprintf(` + { + "template":"%s", + "settings": { + "index": { + "refresh_interval": "10s", + "mapping.total_fields.limit": 5000 + } + }, + "mappings" : { + "_default_" : { + "_all": { "enabled": false }, + "properties" : { + "@timestamp" : { "type" : "date" }, + "measurement_name" : { "type" : "keyword" } + }, + "dynamic_templates": [ + { + "tags": { + "match_mapping_type": "string", + "path_match": "tag.*", + "mapping": { + "ignore_above": 512, + "type": "keyword" + } + } + }, + { + "metrics_long": { + "match_mapping_type": "long", + "mapping": { + "type": "float", + "index": false + } + } + }, + { + "metrics_double": { + "match_mapping_type": "double", + "mapping": { + "type": "float", + "index": false + } + } + }, + { + "text_fields": { + "match": "*", + "mapping": { + "norms": false + } + } + } + ] + } + } + }`, templatePattern) + _, errCreateTemplate := a.Client.IndexPutTemplate(a.TemplateName).BodyString(tmpl).Do(ctx) + + if errCreateTemplate != nil { + return fmt.Errorf("Elasticsearch failed to create index template %s : %s", a.TemplateName, errCreateTemplate) + } + + log.Printf("D! Elasticsearch template %s created or updated\n", a.TemplateName) + + } else { + + log.Println("D! Found existing Elasticsearch template. Skipping template management") + + } + return nil +} + +func (a *Elasticsearch) GetIndexName(indexName string, eventTime time.Time) string { + if strings.Contains(indexName, "%") { + var dateReplacer = strings.NewReplacer( + "%Y", eventTime.UTC().Format("2006"), + "%y", eventTime.UTC().Format("06"), + "%m", eventTime.UTC().Format("01"), + "%d", eventTime.UTC().Format("02"), + "%H", eventTime.UTC().Format("15"), + ) + + indexName = dateReplacer.Replace(indexName) + } + + return indexName + +} + +func (a *Elasticsearch) SampleConfig() string { + return sampleConfig +} + +func (a *Elasticsearch) Description() string { + return "Configuration for Elasticsearch to send metrics to." +} + +func (a *Elasticsearch) Close() error { + a.Client = nil + return nil +} + +func init() { + outputs.Add("elasticsearch", func() telegraf.Output { + return &Elasticsearch{ + Timeout: internal.Duration{Duration: time.Second * 5}, + HealthCheckInterval: internal.Duration{Duration: time.Second * 10}, + } + }) +} diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go new file mode 100644 index 0000000000000..9163a2bbe7f03 --- /dev/null +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -0,0 +1,126 @@ +package elasticsearch + +import ( + "context" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConnectAndWrite(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + urls := []string{"http://" + testutil.GetLocalHost() + ":9200"} + + e := &Elasticsearch{ + URLs: urls, + IndexName: "test-%Y.%m.%d", + Timeout: internal.Duration{Duration: time.Second * 5}, + ManageTemplate: true, + TemplateName: "telegraf", + OverwriteTemplate: false, + HealthCheckInterval: internal.Duration{Duration: time.Second * 10}, + } + + // Verify that we can connect to Elasticsearch + err := e.Connect() + require.NoError(t, err) + + // Verify that we can successfully write data to Elasticsearch + err = e.Write(testutil.MockMetrics()) + require.NoError(t, err) + +} + +func TestTemplateManagementEmptyTemplate(t *testing.T) { + urls := []string{"http://" + testutil.GetLocalHost() + ":9200"} + + ctx := context.Background() + + e := &Elasticsearch{ + URLs: urls, + IndexName: "test-%Y.%m.%d", + Timeout: internal.Duration{Duration: time.Second * 5}, + ManageTemplate: true, + TemplateName: "", + OverwriteTemplate: true, + } + + err := e.manageTemplate(ctx) + require.Error(t, err) + +} + +func TestTemplateManagement(t *testing.T) { + urls := []string{"http://" + testutil.GetLocalHost() + ":9200"} + + e := &Elasticsearch{ + URLs: urls, + IndexName: "test-%Y.%m.%d", + Timeout: internal.Duration{Duration: time.Second * 5}, + ManageTemplate: true, + TemplateName: "telegraf", + OverwriteTemplate: true, + } + + ctx, cancel := context.WithTimeout(context.Background(), e.Timeout.Duration) + defer cancel() + + err := e.Connect() + require.NoError(t, err) + + err = e.manageTemplate(ctx) + require.NoError(t, err) +} + +func TestGetIndexName(t *testing.T) { + e := &Elasticsearch{} + + var tests = []struct { + EventTime time.Time + IndexName string + Expected string + }{ + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + "indexname", + "indexname", + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + "indexname-%Y", + "indexname-2014", + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + "indexname-%Y-%m", + "indexname-2014-12", + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + "indexname-%Y-%m-%d", + "indexname-2014-12-01", + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + "indexname-%Y-%m-%d-%H", + "indexname-2014-12-01-23", + }, + { + time.Date(2014, 12, 01, 23, 30, 00, 00, time.UTC), + "indexname-%y-%m", + "indexname-14-12", + }, + } + for _, test := range tests { + indexName := e.GetIndexName(test.IndexName, test.EventTime) + if indexName != test.Expected { + t.Errorf("Expected indexname %s, got %s\n", indexName, test.Expected) + } + } +} From 5c33c760c7acd2bae24894250606a87fc78c8425 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Mar 2017 10:59:41 -0700 Subject: [PATCH 025/201] Fix procstat initialization --- plugins/inputs/procstat/procstat.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 46b88fbcf80a4..20c5af9d29764 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -235,6 +235,9 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) { func init() { inputs.Add("procstat", func() telegraf.Input { - return &Procstat{} + return &Procstat{ + createPIDFinder: defaultPIDFinder, + createProcess: defaultProcess, + } }) } From 70a0a848821b9e602f8cb8a3e3dce7e5db8a1f31 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 21 Mar 2017 11:40:51 -0700 Subject: [PATCH 026/201] Really fix procstat initialization --- plugins/inputs/procstat/procstat.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 20c5af9d29764..d689ecf3ed630 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -62,6 +62,13 @@ func (_ *Procstat) Description() string { } func (p *Procstat) Gather(acc telegraf.Accumulator) error { + if p.createPIDFinder == nil { + p.createPIDFinder = defaultPIDFinder + } + if p.createProcess == nil { + p.createProcess = defaultProcess + } + procs, err := p.updateProcesses(p.procs) if err != nil { return fmt.Errorf( @@ -235,9 +242,6 @@ func (p *Procstat) findPids() ([]PID, map[string]string, error) { func init() { inputs.Add("procstat", func() telegraf.Input { - return &Procstat{ - createPIDFinder: defaultPIDFinder, - createProcess: defaultProcess, - } + return &Procstat{} }) } From 616b66f5cb12ae9b95cd6e101e709a4681a56bb7 Mon Sep 17 00:00:00 2001 From: Oskar Date: Wed, 22 Mar 2017 20:04:58 +0100 Subject: [PATCH 027/201] Multi instances in win_perf_counters (#2352) --- CHANGELOG.md | 2 ++ plugins/inputs/win_perf_counters/win_perf_counters.go | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ef7f580b0565..fd1ec5136be86 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,8 @@ be deprecated eventually. - [#2520](https://github.com/influxdata/telegraf/pull/2520): Update default value for Cloudwatch rate limit - [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball. - [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format. +- [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier + ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index da59c3040266f..5365dc68bb043 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -265,6 +265,11 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { } else if metric.instance == s { // Catch if we set it to total or some form of it add = true + } else if strings.Contains(metric.instance, "#") && strings.HasPrefix(metric.instance, s) { + // If you are using a multiple instance identifier such as "w3wp#1" + // phd.dll returns only the first 2 characters of the identifier. + add = true + s = metric.instance } else if metric.instance == "------" { add = true } From 1402c158b74789132af2b885315d619648003f83 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 24 Mar 2017 15:03:36 -0400 Subject: [PATCH 028/201] remove sleep from tests (#2555) --- plugins/inputs/cloudwatch/cloudwatch_test.go | 5 +- .../http_listener/http_listener_test.go | 47 ++-------- .../http_response/http_response_test.go | 2 +- .../inputs/kafka_consumer/kafka_consumer.go | 9 +- .../kafka_consumer/kafka_consumer_test.go | 11 ++- plugins/inputs/logparser/logparser_test.go | 10 +-- plugins/inputs/mongodb/mongodb_server_test.go | 4 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 10 +-- .../mqtt_consumer/mqtt_consumer_test.go | 15 ++-- plugins/inputs/nats_consumer/nats_consumer.go | 8 +- .../nats_consumer/nats_consumer_test.go | 20 ++--- .../socket_listener/socket_listener_test.go | 49 ++++------- plugins/inputs/tail/tail.go | 23 +++-- plugins/inputs/tail/tail_test.go | 19 ++-- .../inputs/tcp_listener/tcp_listener_test.go | 63 ++++++------- plugins/inputs/udp_listener/udp_listener.go | 25 ++++-- .../inputs/udp_listener/udp_listener_test.go | 57 +++++++----- plugins/outputs/graphite/graphite_test.go | 48 +++++----- plugins/outputs/influxdb/client/udp_test.go | 1 - .../outputs/instrumental/instrumental_test.go | 88 +++++++++---------- testutil/accumulator.go | 26 +++++- 21 files changed, 261 insertions(+), 279 deletions(-) diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index f2d58a00ca9fa..3aaab7d455783 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -207,14 +207,13 @@ func TestGenerateStatisticsInputParams(t *testing.T) { } func TestMetricsCacheTimeout(t *testing.T) { - ttl, _ := time.ParseDuration("5ms") cache := &MetricCache{ Metrics: []*cloudwatch.Metric{}, Fetched: time.Now(), - TTL: ttl, + TTL: time.Minute, } assert.True(t, cache.IsValid()) - time.Sleep(ttl) + cache.Fetched = time.Now().Add(-time.Minute) assert.False(t, cache.IsValid()) } diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go index b5f858fdee207..7e6fbc8abfdda 100644 --- a/plugins/inputs/http_listener/http_listener_test.go +++ b/plugins/inputs/http_listener/http_listener_test.go @@ -6,7 +6,6 @@ import ( "net/http" "sync" "testing" - "time" "github.com/influxdata/telegraf/testutil" @@ -43,14 +42,12 @@ func TestWriteHTTP(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - // post single message to listener resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) - time.Sleep(time.Millisecond * 15) + acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, @@ -61,7 +58,7 @@ func TestWriteHTTP(t *testing.T) { require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) - time.Sleep(time.Millisecond * 15) + acc.Wait(2) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} for _, hostTag := range hostTags { @@ -76,7 +73,7 @@ func TestWriteHTTP(t *testing.T) { require.NoError(t, err) require.EqualValues(t, 400, resp.StatusCode) - time.Sleep(time.Millisecond * 15) + acc.Wait(3) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, @@ -91,14 +88,12 @@ func TestWriteHTTPNoNewline(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - // post single message to listener resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgNoNewline))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) - time.Sleep(time.Millisecond * 15) + acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, @@ -115,8 +110,6 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - // Post a gigantic metric to the listener and verify that it writes OK this time: resp, err := http.Post("http://localhost:8296/write?db=mydb", "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) @@ -133,8 +126,6 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - resp, err := http.Post("http://localhost:8297/write", "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) require.EqualValues(t, 413, resp.StatusCode) @@ -150,15 +141,13 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - resp, err := http.Post("http://localhost:8298/write", "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) - time.Sleep(time.Millisecond * 15) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} + acc.Wait(len(hostTags)) for _, hostTag := range hostTags { acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, @@ -177,15 +166,13 @@ func TestWriteHTTPLargeLinesSkipped(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - resp, err := http.Post("http://localhost:8300/write", "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) require.NoError(t, err) require.EqualValues(t, 400, resp.StatusCode) - time.Sleep(time.Millisecond * 15) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} + acc.Wait(len(hostTags)) for _, hostTag := range hostTags { acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, @@ -204,8 +191,6 @@ func TestWriteHTTPGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) @@ -218,9 +203,9 @@ func TestWriteHTTPGzippedData(t *testing.T) { require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) - time.Sleep(time.Millisecond * 50) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} + acc.Wait(len(hostTags)) for _, hostTag := range hostTags { acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, @@ -237,8 +222,6 @@ func TestWriteHTTPHighTraffic(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - // post many messages to listener var wg sync.WaitGroup for i := 0; i < 10; i++ { @@ -254,9 +237,9 @@ func TestWriteHTTPHighTraffic(t *testing.T) { } wg.Wait() - time.Sleep(time.Millisecond * 250) listener.Gather(acc) + acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) } @@ -267,8 +250,6 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - // post single message to listener resp, err := http.Post("http://localhost:8186/foobar", "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) @@ -276,16 +257,12 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { } func TestWriteHTTPInvalid(t *testing.T) { - time.Sleep(time.Millisecond * 250) - listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - // post single message to listener resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) @@ -293,16 +270,12 @@ func TestWriteHTTPInvalid(t *testing.T) { } func TestWriteHTTPEmpty(t *testing.T) { - time.Sleep(time.Millisecond * 250) - listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - // post single message to listener resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) @@ -310,16 +283,12 @@ func TestWriteHTTPEmpty(t *testing.T) { } func TestQueryAndPingHTTP(t *testing.T) { - time.Sleep(time.Millisecond * 250) - listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) - // post query to listener resp, err := http.Post("http://localhost:8186/query?db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22", "", nil) require.NoError(t, err) diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 236e5d88ba5e6..b65b1f9544d9f 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -329,7 +329,7 @@ func TestTimeout(t *testing.T) { Address: ts.URL + "/twosecondnap", Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 1}, + ResponseTimeout: internal.Duration{Duration: time.Millisecond}, Headers: map[string]string{ "Content-Type": "application/json", }, diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index f4176edd38b7e..6f1f4020b4633 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -1,6 +1,7 @@ package kafka_consumer import ( + "fmt" "log" "strings" "sync" @@ -129,13 +130,13 @@ func (k *Kafka) receiver() { return case err := <-k.errs: if err != nil { - log.Printf("E! Kafka Consumer Error: %s\n", err) + k.acc.AddError(fmt.Errorf("Kafka Consumer Error: %s\n", err)) } case msg := <-k.in: metrics, err := k.parser.Parse(msg.Value) if err != nil { - log.Printf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s", - string(msg.Value), err.Error()) + k.acc.AddError(fmt.Errorf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s", + string(msg.Value), err.Error())) } for _, metric := range metrics { @@ -158,7 +159,7 @@ func (k *Kafka) Stop() { defer k.Unlock() close(k.done) if err := k.Consumer.Close(); err != nil { - log.Printf("E! Error closing kafka consumer: %s\n", err.Error()) + k.acc.AddError(fmt.Errorf("E! Error closing kafka consumer: %s\n", err.Error())) } } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index c4936974f239f..e1c24adbed1b6 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -2,7 +2,6 @@ package kafka_consumer import ( "testing" - "time" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -43,7 +42,7 @@ func TestRunParser(t *testing.T) { k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(testMsg) - time.Sleep(time.Millisecond * 5) + acc.Wait(1) assert.Equal(t, acc.NFields(), 1) } @@ -58,7 +57,7 @@ func TestRunParserInvalidMsg(t *testing.T) { k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(invalidMsg) - time.Sleep(time.Millisecond * 5) + acc.WaitError(1) assert.Equal(t, acc.NFields(), 0) } @@ -73,7 +72,7 @@ func TestRunParserAndGather(t *testing.T) { k.parser, _ = parsers.NewInfluxParser() go k.receiver() in <- saramaMsg(testMsg) - time.Sleep(time.Millisecond * 5) + acc.Wait(1) k.Gather(&acc) @@ -92,7 +91,7 @@ func TestRunParserAndGatherGraphite(t *testing.T) { k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) go k.receiver() in <- saramaMsg(testMsgGraphite) - time.Sleep(time.Millisecond * 5) + acc.Wait(1) k.Gather(&acc) @@ -111,7 +110,7 @@ func TestRunParserAndGatherJSON(t *testing.T) { k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil) go k.receiver() in <- saramaMsg(testMsgJSON) - time.Sleep(time.Millisecond * 5) + acc.Wait(1) k.Gather(&acc) diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 059bfd266ac92..db9795f286fa0 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -6,7 +6,6 @@ import ( "runtime" "strings" "testing" - "time" "github.com/influxdata/telegraf/testutil" @@ -41,7 +40,6 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) { acc := testutil.Accumulator{} assert.Error(t, logparser.Start(&acc)) - time.Sleep(time.Millisecond * 500) logparser.Stop() } @@ -61,7 +59,8 @@ func TestGrokParseLogFiles(t *testing.T) { acc := testutil.Accumulator{} assert.NoError(t, logparser.Start(&acc)) - time.Sleep(time.Millisecond * 500) + acc.Wait(2) + logparser.Stop() acc.AssertContainsTaggedFields(t, "logparser_grok", @@ -102,14 +101,13 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { acc := testutil.Accumulator{} assert.NoError(t, logparser.Start(&acc)) - time.Sleep(time.Millisecond * 500) assert.Equal(t, acc.NFields(), 0) os.Symlink( thisdir+"grok/testdata/test_a.log", emptydir+"/test_a.log") assert.NoError(t, logparser.Gather(&acc)) - time.Sleep(time.Millisecond * 500) + acc.Wait(1) logparser.Stop() @@ -143,7 +141,7 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { acc.SetDebug(true) assert.NoError(t, logparser.Start(&acc)) - time.Sleep(time.Millisecond * 500) + acc.Wait(1) logparser.Stop() acc.AssertContainsTaggedFields(t, "logparser_grok", diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 7ad0f38a2ef15..e9d1bae9e69bd 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -4,7 +4,6 @@ package mongodb import ( "testing" - "time" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -32,12 +31,11 @@ func TestAddDefaultStats(t *testing.T) { err := server.gatherData(&acc, false) require.NoError(t, err) - time.Sleep(time.Duration(1) * time.Second) // need to call this twice so it can perform the diff err = server.gatherData(&acc, false) require.NoError(t, err) for key, _ := range DefaultStats { - assert.True(t, acc.HasIntValue(key)) + assert.True(t, acc.HasIntField("mongodb", key)) } } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index cfade2944d7e1..3ea0480b8478e 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -142,8 +142,8 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) { subscribeToken := c.SubscribeMultiple(topics, m.recvMessage) subscribeToken.Wait() if subscribeToken.Error() != nil { - log.Printf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s", - strings.Join(m.Topics[:], ","), subscribeToken.Error()) + m.acc.AddError(fmt.Errorf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s", + strings.Join(m.Topics[:], ","), subscribeToken.Error())) } m.started = true } @@ -151,7 +151,7 @@ func (m *MQTTConsumer) onConnect(c mqtt.Client) { } func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { - log.Printf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error()) + m.acc.AddError(fmt.Errorf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error())) return } @@ -166,8 +166,8 @@ func (m *MQTTConsumer) receiver() { topic := msg.Topic() metrics, err := m.parser.Parse(msg.Payload()) if err != nil { - log.Printf("E! MQTT Parse Error\nmessage: %s\nerror: %s", - string(msg.Payload()), err.Error()) + m.acc.AddError(fmt.Errorf("E! MQTT Parse Error\nmessage: %s\nerror: %s", + string(msg.Payload()), err.Error())) } for _, metric := range metrics { diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 2f5276191fe5c..027e4818b6cde 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -2,7 +2,6 @@ package mqtt_consumer import ( "testing" - "time" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -86,7 +85,7 @@ func TestRunParser(t *testing.T) { n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(testMsgNeg) - time.Sleep(time.Millisecond * 250) + acc.Wait(1) if a := acc.NFields(); a != 1 { t.Errorf("got %v, expected %v", a, 1) @@ -102,7 +101,7 @@ func TestRunParserNegativeNumber(t *testing.T) { n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(testMsg) - time.Sleep(time.Millisecond * 25) + acc.Wait(1) if a := acc.NFields(); a != 1 { t.Errorf("got %v, expected %v", a, 1) @@ -119,11 +118,12 @@ func TestRunParserInvalidMsg(t *testing.T) { n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(invalidMsg) - time.Sleep(time.Millisecond * 25) + acc.WaitError(1) if a := acc.NFields(); a != 0 { t.Errorf("got %v, expected %v", a, 0) } + assert.Contains(t, acc.Errors[0].Error(), "MQTT Parse Error") } // Test that the parser parses line format messages into metrics @@ -136,7 +136,7 @@ func TestRunParserAndGather(t *testing.T) { n.parser, _ = parsers.NewInfluxParser() go n.receiver() in <- mqttMsg(testMsg) - time.Sleep(time.Millisecond * 25) + acc.Wait(1) n.Gather(&acc) @@ -154,9 +154,9 @@ func TestRunParserAndGatherGraphite(t *testing.T) { n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) go n.receiver() in <- mqttMsg(testMsgGraphite) - time.Sleep(time.Millisecond * 25) n.Gather(&acc) + acc.Wait(1) acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) @@ -172,10 +172,11 @@ func TestRunParserAndGatherJSON(t *testing.T) { n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) go n.receiver() in <- mqttMsg(testMsgJSON) - time.Sleep(time.Millisecond * 25) n.Gather(&acc) + acc.Wait(1) + acc.AssertContainsFields(t, "nats_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index cbb85e0162e9b..7c9f53941f978 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -162,11 +162,11 @@ func (n *natsConsumer) receiver() { case <-n.done: return case err := <-n.errs: - log.Printf("E! error reading from %s\n", err.Error()) + n.acc.AddError(fmt.Errorf("E! error reading from %s\n", err.Error())) case msg := <-n.in: metrics, err := n.parser.Parse(msg.Data) if err != nil { - log.Printf("E! subject: %s, error: %s", msg.Subject, err.Error()) + n.acc.AddError(fmt.Errorf("E! subject: %s, error: %s", msg.Subject, err.Error())) } for _, metric := range metrics { @@ -179,8 +179,8 @@ func (n *natsConsumer) receiver() { func (n *natsConsumer) clean() { for _, sub := range n.Subs { if err := sub.Unsubscribe(); err != nil { - log.Printf("E! Error unsubscribing from subject %s in queue %s: %s\n", - sub.Subject, sub.Queue, err.Error()) + n.acc.AddError(fmt.Errorf("E! Error unsubscribing from subject %s in queue %s: %s\n", + sub.Subject, sub.Queue, err.Error())) } } diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go index 2f4d14d7337b9..30ba0d2afe95e 100644 --- a/plugins/inputs/nats_consumer/nats_consumer_test.go +++ b/plugins/inputs/nats_consumer/nats_consumer_test.go @@ -2,11 +2,11 @@ package natsconsumer import ( "testing" - "time" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/nats-io/nats" + "github.com/stretchr/testify/assert" ) const ( @@ -42,11 +42,8 @@ func TestRunParser(t *testing.T) { n.wg.Add(1) go n.receiver() in <- natsMsg(testMsg) - time.Sleep(time.Millisecond * 25) - if acc.NFields() != 1 { - t.Errorf("got %v, expected %v", acc.NFields(), 1) - } + acc.Wait(1) } // Test that the parser ignores invalid messages @@ -60,11 +57,10 @@ func TestRunParserInvalidMsg(t *testing.T) { n.wg.Add(1) go n.receiver() in <- natsMsg(invalidMsg) - time.Sleep(time.Millisecond * 25) - if acc.NFields() != 0 { - t.Errorf("got %v, expected %v", acc.NFields(), 0) - } + acc.WaitError(1) + assert.Contains(t, acc.Errors[0].Error(), "E! subject: telegraf, error: metric parsing error") + assert.EqualValues(t, 0, acc.NMetrics()) } // Test that the parser parses line format messages into metrics @@ -78,10 +74,10 @@ func TestRunParserAndGather(t *testing.T) { n.wg.Add(1) go n.receiver() in <- natsMsg(testMsg) - time.Sleep(time.Millisecond * 25) n.Gather(&acc) + acc.Wait(1) acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) } @@ -97,10 +93,10 @@ func TestRunParserAndGatherGraphite(t *testing.T) { n.wg.Add(1) go n.receiver() in <- natsMsg(testMsgGraphite) - time.Sleep(time.Millisecond * 25) n.Gather(&acc) + acc.Wait(1) acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) } @@ -116,10 +112,10 @@ func TestRunParserAndGatherJSON(t *testing.T) { n.wg.Add(1) go n.receiver() in <- natsMsg(testMsgJSON) - time.Sleep(time.Millisecond * 25) n.Gather(&acc) + acc.Wait(1) acc.AssertContainsFields(t, "nats_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index 6764b6d2d9733..9fa472809b8e3 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -81,42 +81,25 @@ func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { acc := sl.Accumulator.(*testutil.Accumulator) + acc.Wait(3) acc.Lock() - if len(acc.Metrics) < 1 { - acc.Wait() - } - require.True(t, len(acc.Metrics) >= 1) - m := acc.Metrics[0] + m1 := acc.Metrics[0] + m2 := acc.Metrics[1] + m3 := acc.Metrics[2] acc.Unlock() - assert.Equal(t, "test", m.Measurement) - assert.Equal(t, map[string]string{"foo": "bar"}, m.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(1)}, m.Fields) - assert.True(t, time.Unix(0, 123456789).Equal(m.Time)) - - acc.Lock() - if len(acc.Metrics) < 2 { - acc.Wait() - } - require.True(t, len(acc.Metrics) >= 2) - m = acc.Metrics[1] - acc.Unlock() + assert.Equal(t, "test", m1.Measurement) + assert.Equal(t, map[string]string{"foo": "bar"}, m1.Tags) + assert.Equal(t, map[string]interface{}{"v": int64(1)}, m1.Fields) + assert.True(t, time.Unix(0, 123456789).Equal(m1.Time)) - assert.Equal(t, "test", m.Measurement) - assert.Equal(t, map[string]string{"foo": "baz"}, m.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(2)}, m.Fields) - assert.True(t, time.Unix(0, 123456790).Equal(m.Time)) - - acc.Lock() - if len(acc.Metrics) < 3 { - acc.Wait() - } - require.True(t, len(acc.Metrics) >= 3) - m = acc.Metrics[2] - acc.Unlock() + assert.Equal(t, "test", m2.Measurement) + assert.Equal(t, map[string]string{"foo": "baz"}, m2.Tags) + assert.Equal(t, map[string]interface{}{"v": int64(2)}, m2.Fields) + assert.True(t, time.Unix(0, 123456790).Equal(m2.Time)) - assert.Equal(t, "test", m.Measurement) - assert.Equal(t, map[string]string{"foo": "zab"}, m.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(3)}, m.Fields) - assert.True(t, time.Unix(0, 123456791).Equal(m.Time)) + assert.Equal(t, "test", m3.Measurement) + assert.Equal(t, map[string]string{"foo": "zab"}, m3.Tags) + assert.Equal(t, map[string]interface{}{"v": int64(3)}, m3.Fields) + assert.True(t, time.Unix(0, 123456791).Equal(m3.Time)) } diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 508c1e32027ef..0c19f91164b8e 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -2,7 +2,6 @@ package tail import ( "fmt" - "log" "sync" "github.com/hpcloud/tail" @@ -86,7 +85,7 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { for _, filepath := range t.Files { g, err := globpath.Compile(filepath) if err != nil { - log.Printf("E! Error Glob %s failed to compile, %s", filepath, err) + t.acc.AddError(fmt.Errorf("E! Error Glob %s failed to compile, %s", filepath, err)) } for file, _ := range g.Match() { tailer, err := tail.TailFile(file, @@ -124,21 +123,21 @@ func (t *Tail) receiver(tailer *tail.Tail) { var line *tail.Line for line = range tailer.Lines { if line.Err != nil { - log.Printf("E! Error tailing file %s, Error: %s\n", - tailer.Filename, err) + t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n", + tailer.Filename, err)) continue } m, err = t.parser.ParseLine(line.Text) if err == nil { t.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) } else { - log.Printf("E! Malformed log line in %s: [%s], Error: %s\n", - tailer.Filename, line.Text, err) + t.acc.AddError(fmt.Errorf("E! Malformed log line in %s: [%s], Error: %s\n", + tailer.Filename, line.Text, err)) } } if err := tailer.Err(); err != nil { - log.Printf("E! Error tailing file %s, Error: %s\n", - tailer.Filename, err) + t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n", + tailer.Filename, err)) } } @@ -146,12 +145,12 @@ func (t *Tail) Stop() { t.Lock() defer t.Unlock() - for _, t := range t.tailers { - err := t.Stop() + for _, tailer := range t.tailers { + err := tailer.Stop() if err != nil { - log.Printf("E! Error stopping tail on file %s\n", t.Filename) + t.acc.AddError(fmt.Errorf("E! Error stopping tail on file %s\n", tailer.Filename)) } - t.Cleanup() + tailer.Cleanup() } t.wg.Wait() } diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 31ecfbf30532a..b927d160c9c80 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -3,6 +3,7 @@ package tail import ( "io/ioutil" "os" + "runtime" "testing" "time" @@ -30,11 +31,9 @@ func TestTailFromBeginning(t *testing.T) { acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) - time.Sleep(time.Millisecond * 100) require.NoError(t, tt.Gather(&acc)) - // arbitrary sleep to wait for message to show up - time.Sleep(time.Millisecond * 150) + acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu", map[string]interface{}{ "usage_idle": float64(100), @@ -60,13 +59,19 @@ func TestTailFromEnd(t *testing.T) { acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) - time.Sleep(time.Millisecond * 100) + time.Sleep(time.Millisecond * 200) //TODO remove once https://github.com/hpcloud/tail/pull/114 is merged & added to Godeps + for _, tailer := range tt.tailers { + for n, err := tailer.Tell(); err == nil && n == 0; n, err = tailer.Tell() { + // wait for tailer to jump to end + runtime.Gosched() + } + } _, err = tmpfile.WriteString("cpu,othertag=foo usage_idle=100\n") require.NoError(t, err) require.NoError(t, tt.Gather(&acc)) - time.Sleep(time.Millisecond * 50) + acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu", map[string]interface{}{ "usage_idle": float64(100), @@ -96,7 +101,7 @@ func TestTailBadLine(t *testing.T) { _, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n") require.NoError(t, err) require.NoError(t, tt.Gather(&acc)) - time.Sleep(time.Millisecond * 50) - assert.Len(t, acc.Metrics, 0) + acc.WaitError(1) + assert.Contains(t, acc.Errors[0].Error(), "E! Malformed log line") } diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index f7e5784d3e2df..27ced791c2968 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -1,10 +1,15 @@ package tcp_listener import ( + "bufio" + "bytes" "fmt" + "io" + "log" "net" + "os" + "strings" "testing" - "time" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -54,7 +59,6 @@ func BenchmarkTCP(b *testing.B) { panic(err) } - time.Sleep(time.Millisecond * 25) conn, err := net.Dial("tcp", "127.0.0.1:8198") if err != nil { panic(err) @@ -62,8 +66,10 @@ func BenchmarkTCP(b *testing.B) { for i := 0; i < 100000; i++ { fmt.Fprintf(conn, testMsg) } - // wait for 100,000 metrics to get added to accumulator - time.Sleep(time.Millisecond) + conn.(*net.TCPConn).CloseWrite() + // wait for all 100,000 metrics to be processed + buf := []byte{0} + conn.Read(buf) // will EOF when completed listener.Stop() } } @@ -81,16 +87,18 @@ func TestHighTrafficTCP(t *testing.T) { err := listener.Start(acc) require.NoError(t, err) - time.Sleep(time.Millisecond * 25) conn, err := net.Dial("tcp", "127.0.0.1:8199") require.NoError(t, err) for i := 0; i < 100000; i++ { fmt.Fprintf(conn, testMsg) } - time.Sleep(time.Millisecond) + conn.(*net.TCPConn).CloseWrite() + buf := []byte{0} + _, err = conn.Read(buf) + assert.Equal(t, err, io.EOF) listener.Stop() - assert.Equal(t, 100000, len(acc.Metrics)) + assert.Equal(t, 100000, int(acc.NMetrics())) } func TestConnectTCP(t *testing.T) { @@ -105,13 +113,12 @@ func TestConnectTCP(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) conn, err := net.Dial("tcp", "127.0.0.1:8194") require.NoError(t, err) // send single message to socket fmt.Fprintf(conn, testMsg) - time.Sleep(time.Millisecond * 15) + acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, @@ -119,7 +126,7 @@ func TestConnectTCP(t *testing.T) { // send multiple messages to socket fmt.Fprintf(conn, testMsgs) - time.Sleep(time.Millisecond * 15) + acc.Wait(6) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} for _, hostTag := range hostTags { @@ -143,7 +150,6 @@ func TestConcurrentConns(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) _, err := net.Dial("tcp", "127.0.0.1:8195") assert.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8195") @@ -162,10 +168,8 @@ func TestConcurrentConns(t *testing.T) { " the Telegraf tcp listener configuration.\n", string(buf[:n])) - _, err = conn.Write([]byte(testMsg)) - assert.NoError(t, err) - time.Sleep(time.Millisecond * 10) - assert.Zero(t, acc.NFields()) + _, err = conn.Read(buf) + assert.Equal(t, io.EOF, err) } // Test that MaxTCPConections is respected when max==1 @@ -181,7 +185,6 @@ func TestConcurrentConns1(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) _, err := net.Dial("tcp", "127.0.0.1:8196") assert.NoError(t, err) @@ -198,10 +201,8 @@ func TestConcurrentConns1(t *testing.T) { " the Telegraf tcp listener configuration.\n", string(buf[:n])) - _, err = conn.Write([]byte(testMsg)) - assert.NoError(t, err) - time.Sleep(time.Millisecond * 10) - assert.Zero(t, acc.NFields()) + _, err = conn.Read(buf) + assert.Equal(t, io.EOF, err) } // Test that MaxTCPConections is respected @@ -216,7 +217,6 @@ func TestCloseConcurrentConns(t *testing.T) { acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) - time.Sleep(time.Millisecond * 25) _, err := net.Dial("tcp", "127.0.0.1:8195") assert.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8195") @@ -238,13 +238,9 @@ func TestRunParser(t *testing.T) { go listener.tcpParser() in <- testmsg - time.Sleep(time.Millisecond * 25) listener.Gather(&acc) - if a := acc.NFields(); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } - + acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, @@ -263,11 +259,16 @@ func TestRunParserInvalidMsg(t *testing.T) { listener.wg.Add(1) go listener.tcpParser() + buf := bytes.NewBuffer(nil) + log.SetOutput(buf) + defer log.SetOutput(os.Stderr) in <- testmsg - time.Sleep(time.Millisecond * 25) - if a := acc.NFields(); a != 0 { - t.Errorf("got %v, expected %v", a, 0) + scnr := bufio.NewScanner(buf) + for scnr.Scan() { + if strings.Contains(scnr.Text(), fmt.Sprintf(malformedwarn, 1)) { + break + } } } @@ -284,9 +285,9 @@ func TestRunParserGraphiteMsg(t *testing.T) { go listener.tcpParser() in <- testmsg - time.Sleep(time.Millisecond * 25) listener.Gather(&acc) + acc.Wait(1) acc.AssertContainsFields(t, "cpu_load_graphite", map[string]interface{}{"value": float64(12)}) } @@ -304,9 +305,9 @@ func TestRunParserJSONMsg(t *testing.T) { go listener.tcpParser() in <- testmsg - time.Sleep(time.Millisecond * 25) listener.Gather(&acc) + acc.Wait(1) acc.AssertContainsFields(t, "udp_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 53c6a72f5f1cc..d0a728b3c8484 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -1,6 +1,7 @@ package udp_listener import ( + "fmt" "log" "net" "sync" @@ -107,8 +108,9 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error { u.in = make(chan []byte, u.AllowedPendingMessages) u.done = make(chan struct{}) - u.wg.Add(2) - go u.udpListen() + u.udpListen() + + u.wg.Add(1) go u.udpParser() log.Printf("I! Started UDP listener service on %s (ReadBuffer: %d)\n", u.ServiceAddress, u.UDPBufferSize) @@ -126,32 +128,37 @@ func (u *UdpListener) Stop() { } func (u *UdpListener) udpListen() error { - defer u.wg.Done() var err error address, _ := net.ResolveUDPAddr("udp", u.ServiceAddress) u.listener, err = net.ListenUDP("udp", address) if err != nil { - log.Fatalf("E! Error: ListenUDP - %s", err) + return fmt.Errorf("E! Error: ListenUDP - %s", err) } log.Println("I! UDP server listening on: ", u.listener.LocalAddr().String()) - buf := make([]byte, UDP_MAX_PACKET_SIZE) - if u.UDPBufferSize > 0 { err = u.listener.SetReadBuffer(u.UDPBufferSize) // if we want to move away from OS default if err != nil { - log.Printf("E! Failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err) - return err + return fmt.Errorf("E! Failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err) } } + u.wg.Add(1) + go u.udpListenLoop() + return nil +} + +func (u *UdpListener) udpListenLoop() { + defer u.wg.Done() + + buf := make([]byte, UDP_MAX_PACKET_SIZE) for { select { case <-u.done: - return nil + return default: u.listener.SetReadDeadline(time.Now().Add(time.Second)) diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index eefdd593ed31c..4d78a1a42ad9c 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -1,12 +1,16 @@ package udp_listener import ( + "bufio" + "bytes" "fmt" "io/ioutil" "log" "net" + "os" + "runtime" + "strings" "testing" - "time" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -50,22 +54,27 @@ func TestHighTrafficUDP(t *testing.T) { err := listener.Start(acc) require.NoError(t, err) - time.Sleep(time.Millisecond * 25) conn, err := net.Dial("udp", "127.0.0.1:8126") require.NoError(t, err) + mlen := int64(len(testMsgs)) + var sent int64 for i := 0; i < 20000; i++ { - // arbitrary, just to give the OS buffer some slack handling the - // packet storm. - time.Sleep(time.Microsecond) - fmt.Fprintf(conn, testMsgs) + for sent > listener.BytesRecv.Get()+32000 { + // more than 32kb sitting in OS buffer, let it drain + runtime.Gosched() + } + conn.Write([]byte(testMsgs)) + sent += mlen + } + for sent > listener.BytesRecv.Get() { + runtime.Gosched() + } + for len(listener.in) > 0 { + runtime.Gosched() } - time.Sleep(time.Millisecond) listener.Stop() - // this is not an exact science, since UDP packets can easily get lost or - // dropped, but assume that the OS will be able to - // handle at least 90% of the sent UDP packets. - assert.InDelta(t, 100000, len(acc.Metrics), 10000) + assert.Equal(t, uint64(100000), acc.NMetrics()) } func TestConnectUDP(t *testing.T) { @@ -79,13 +88,12 @@ func TestConnectUDP(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - time.Sleep(time.Millisecond * 25) conn, err := net.Dial("udp", "127.0.0.1:8127") require.NoError(t, err) // send single message to socket fmt.Fprintf(conn, testMsg) - time.Sleep(time.Millisecond * 15) + acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, @@ -93,7 +101,7 @@ func TestConnectUDP(t *testing.T) { // send multiple messages to socket fmt.Fprintf(conn, testMsgs) - time.Sleep(time.Millisecond * 15) + acc.Wait(6) hostTags := []string{"server02", "server03", "server04", "server05", "server06"} for _, hostTag := range hostTags { @@ -118,13 +126,9 @@ func TestRunParser(t *testing.T) { go listener.udpParser() in <- testmsg - time.Sleep(time.Millisecond * 25) listener.Gather(&acc) - if a := acc.NFields(); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } - + acc.Wait(1) acc.AssertContainsTaggedFields(t, "cpu_load_short", map[string]interface{}{"value": float64(12)}, map[string]string{"host": "server01"}, @@ -144,11 +148,16 @@ func TestRunParserInvalidMsg(t *testing.T) { listener.wg.Add(1) go listener.udpParser() + buf := bytes.NewBuffer(nil) + log.SetOutput(buf) + defer log.SetOutput(os.Stderr) in <- testmsg - time.Sleep(time.Millisecond * 25) - if a := acc.NFields(); a != 0 { - t.Errorf("got %v, expected %v", a, 0) + scnr := bufio.NewScanner(buf) + for scnr.Scan() { + if strings.Contains(scnr.Text(), fmt.Sprintf(malformedwarn, 1)) { + break + } } } @@ -166,9 +175,9 @@ func TestRunParserGraphiteMsg(t *testing.T) { go listener.udpParser() in <- testmsg - time.Sleep(time.Millisecond * 25) listener.Gather(&acc) + acc.Wait(1) acc.AssertContainsFields(t, "cpu_load_graphite", map[string]interface{}{"value": float64(12)}) } @@ -187,9 +196,9 @@ func TestRunParserJSONMsg(t *testing.T) { go listener.udpParser() in <- testmsg - time.Sleep(time.Millisecond * 25) listener.Gather(&acc) + acc.Wait(1) acc.AssertContainsFields(t, "udp_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index 4f1f2fef6c8cf..3984728af9dbc 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -44,9 +44,7 @@ func TestGraphiteOK(t *testing.T) { // Start TCP server wg.Add(1) t.Log("Starting server") - go TCPServer1(t, &wg) - // Give the fake graphite TCP server some time to start: - time.Sleep(time.Millisecond * 100) + TCPServer1(t, &wg) // Init plugin g := Graphite{ @@ -88,10 +86,8 @@ func TestGraphiteOK(t *testing.T) { t.Log("Finished Waiting for first data") var wg2 sync.WaitGroup // Start TCP server - time.Sleep(time.Millisecond * 100) wg2.Add(1) - go TCPServer2(t, &wg2) - time.Sleep(time.Millisecond * 100) + TCPServer2(t, &wg2) //Write but expect an error, but reconnect g.Write(metrics2) err3 := g.Write(metrics2) @@ -105,27 +101,31 @@ func TestGraphiteOK(t *testing.T) { } func TCPServer1(t *testing.T, wg *sync.WaitGroup) { - defer wg.Done() tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") - conn, _ := (tcpServer).Accept() - reader := bufio.NewReader(conn) - tp := textproto.NewReader(reader) - data1, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1) - conn.Close() - tcpServer.Close() + go func() { + defer wg.Done() + conn, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + data1, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1) + conn.Close() + tcpServer.Close() + }() } func TCPServer2(t *testing.T, wg *sync.WaitGroup) { - defer wg.Done() tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") - conn2, _ := (tcpServer).Accept() - reader := bufio.NewReader(conn2) - tp := textproto.NewReader(reader) - data2, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2) - data3, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.192_168_0_1.my_measurement 3.14 1289430000", data3) - conn2.Close() - tcpServer.Close() + go func() { + defer wg.Done() + conn2, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn2) + tp := textproto.NewReader(reader) + data2, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2) + data3, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.my_measurement 3.14 1289430000", data3) + conn2.Close() + tcpServer.Close() + }() } diff --git a/plugins/outputs/influxdb/client/udp_test.go b/plugins/outputs/influxdb/client/udp_test.go index 31196ddcad79e..84efe0b225493 100644 --- a/plugins/outputs/influxdb/client/udp_test.go +++ b/plugins/outputs/influxdb/client/udp_test.go @@ -66,7 +66,6 @@ func TestUDPClient_Write(t *testing.T) { }() // test sending simple metric - time.Sleep(time.Second) n, err := client.Write([]byte("cpu value=99\n")) assert.Equal(t, n, 13) assert.NoError(t, err) diff --git a/plugins/outputs/instrumental/instrumental_test.go b/plugins/outputs/instrumental/instrumental_test.go index d77d8eb058022..0d3ce904008e6 100644 --- a/plugins/outputs/instrumental/instrumental_test.go +++ b/plugins/outputs/instrumental/instrumental_test.go @@ -16,9 +16,7 @@ import ( func TestWrite(t *testing.T) { var wg sync.WaitGroup wg.Add(1) - go TCPServer(t, &wg) - // Give the fake TCP server some time to start: - time.Sleep(time.Millisecond * 100) + TCPServer(t, &wg) i := Instrumental{ Host: "127.0.0.1", @@ -79,45 +77,47 @@ func TestWrite(t *testing.T) { func TCPServer(t *testing.T, wg *sync.WaitGroup) { tcpServer, _ := net.Listen("tcp", "127.0.0.1:8000") - defer wg.Done() - conn, _ := tcpServer.Accept() - conn.SetDeadline(time.Now().Add(1 * time.Second)) - reader := bufio.NewReader(conn) - tp := textproto.NewReader(reader) - - hello, _ := tp.ReadLine() - assert.Equal(t, "hello version go/telegraf/1.1", hello) - auth, _ := tp.ReadLine() - assert.Equal(t, "authenticate abc123token", auth) - conn.Write([]byte("ok\nok\n")) - - data1, _ := tp.ReadLine() - assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1) - data2, _ := tp.ReadLine() - assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2) - - conn, _ = tcpServer.Accept() - conn.SetDeadline(time.Now().Add(1 * time.Second)) - reader = bufio.NewReader(conn) - tp = textproto.NewReader(reader) - - hello, _ = tp.ReadLine() - assert.Equal(t, "hello version go/telegraf/1.1", hello) - auth, _ = tp.ReadLine() - assert.Equal(t, "authenticate abc123token", auth) - conn.Write([]byte("ok\nok\n")) - - data3, _ := tp.ReadLine() - assert.Equal(t, "increment my.prefix.192_168_0_1.my_histogram 3.14 1289430000", data3) - - data4, _ := tp.ReadLine() - assert.Equal(t, "increment my.prefix.192_168_0_1_8888_123.bad_metric_name 1 1289430000", data4) - - data5, _ := tp.ReadLine() - assert.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data5) - - data6, _ := tp.ReadLine() - assert.Equal(t, "", data6) - - conn.Close() + go func() { + defer wg.Done() + conn, _ := tcpServer.Accept() + conn.SetDeadline(time.Now().Add(1 * time.Second)) + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + + hello, _ := tp.ReadLine() + assert.Equal(t, "hello version go/telegraf/1.1", hello) + auth, _ := tp.ReadLine() + assert.Equal(t, "authenticate abc123token", auth) + conn.Write([]byte("ok\nok\n")) + + data1, _ := tp.ReadLine() + assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1) + data2, _ := tp.ReadLine() + assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2) + + conn, _ = tcpServer.Accept() + conn.SetDeadline(time.Now().Add(1 * time.Second)) + reader = bufio.NewReader(conn) + tp = textproto.NewReader(reader) + + hello, _ = tp.ReadLine() + assert.Equal(t, "hello version go/telegraf/1.1", hello) + auth, _ = tp.ReadLine() + assert.Equal(t, "authenticate abc123token", auth) + conn.Write([]byte("ok\nok\n")) + + data3, _ := tp.ReadLine() + assert.Equal(t, "increment my.prefix.192_168_0_1.my_histogram 3.14 1289430000", data3) + + data4, _ := tp.ReadLine() + assert.Equal(t, "increment my.prefix.192_168_0_1_8888_123.bad_metric_name 1 1289430000", data4) + + data5, _ := tp.ReadLine() + assert.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data5) + + data6, _ := tp.ReadLine() + assert.Equal(t, "", data6) + + conn.Close() + }() } diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 63dfddd7ada80..02bebf9c8b68c 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -129,6 +129,9 @@ func (a *Accumulator) AddError(err error) { } a.Lock() a.Errors = append(a.Errors, err) + if a.Cond != nil { + a.Cond.Broadcast() + } a.Unlock() } @@ -198,13 +201,28 @@ func (a *Accumulator) NFields() int { return counter } -// Wait waits for a metric to be added to the accumulator. -// Accumulator must already be locked. -func (a *Accumulator) Wait() { +// Wait waits for the given number of metrics to be added to the accumulator. +func (a *Accumulator) Wait(n int) { + a.Lock() + if a.Cond == nil { + a.Cond = sync.NewCond(&a.Mutex) + } + for int(a.NMetrics()) < n { + a.Cond.Wait() + } + a.Unlock() +} + +// WaitError waits for the given number of errors to be added to the accumulator. +func (a *Accumulator) WaitError(n int) { + a.Lock() if a.Cond == nil { a.Cond = sync.NewCond(&a.Mutex) } - a.Cond.Wait() + for len(a.Errors) < n { + a.Cond.Wait() + } + a.Unlock() } func (a *Accumulator) AssertContainsTaggedFields( From 995546e7c685136924c9d30ba5b49b30d6e2ddb1 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 24 Mar 2017 15:06:52 -0400 Subject: [PATCH 029/201] snmp: support table indexes as tags (#2366) --- CHANGELOG.md | 1 + plugins/inputs/snmp/README.md | 3 +++ plugins/inputs/snmp/snmp.go | 15 ++++++++++++--- plugins/inputs/snmp/snmp_test.go | 21 ++++++++++++++++----- 4 files changed, 32 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fd1ec5136be86..b1655f77d3b49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ be deprecated eventually. - [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin - [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin. - [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates. +- [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags. ### Bugfixes diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 473f2a52bd0c0..27b1f75712d04 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -168,6 +168,9 @@ If not specified, it defaults to the value of `oid`. If `oid` is numeric, an at * `inherit_tags`: Which tags to inherit from the top-level config and to use in the output of this table's measurement. +* `index_as_tag`: +Adds each row's index within the table as a tag. + ### MIB lookups If the plugin is configured such that it needs to perform lookups from the MIB, it will use the net-snmp utilities `snmptranslate` and `snmptable`. diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 9296bc0434896..5394e57db5634 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -168,6 +168,9 @@ type Table struct { // Which tags to inherit from the top-level config. InheritTags []string + // Adds each row's table index as a tag. + IndexAsTag bool + // Fields is the tags and values to look up. Fields []Field `toml:"field"` @@ -464,13 +467,19 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } } - for i, v := range ifv { - rtr, ok := rows[i] + for idx, v := range ifv { + rtr, ok := rows[idx] if !ok { rtr = RTableRow{} rtr.Tags = map[string]string{} rtr.Fields = map[string]interface{}{} - rows[i] = rtr + rows[idx] = rtr + } + if t.IndexAsTag && idx != "" { + if idx[0] == '.' { + idx = idx[1:] + } + rtr.Tags["index"] = idx } // don't add an empty string if vs, ok := v.(string); !ok || vs != "" { diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 62b19fcea1b36..07fdeddc1cbfe 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -413,7 +413,8 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { func TestTableBuild_walk(t *testing.T) { tbl := Table{ - Name: "mytable", + Name: "mytable", + IndexAsTag: true, Fields: []Field{ { Name: "myfield1", @@ -442,7 +443,10 @@ func TestTableBuild_walk(t *testing.T) { assert.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ - Tags: map[string]string{"myfield1": "foo"}, + Tags: map[string]string{ + "myfield1": "foo", + "index": "0", + }, Fields: map[string]interface{}{ "myfield2": 1, "myfield3": float64(0.123), @@ -450,7 +454,10 @@ func TestTableBuild_walk(t *testing.T) { }, } rtr2 := RTableRow{ - Tags: map[string]string{"myfield1": "bar"}, + Tags: map[string]string{ + "myfield1": "bar", + "index": "1", + }, Fields: map[string]interface{}{ "myfield2": 2, "myfield3": float64(0.456), @@ -458,14 +465,18 @@ func TestTableBuild_walk(t *testing.T) { }, } rtr3 := RTableRow{ - Tags: map[string]string{}, + Tags: map[string]string{ + "index": "2", + }, Fields: map[string]interface{}{ "myfield2": 0, "myfield3": float64(0.0), }, } rtr4 := RTableRow{ - Tags: map[string]string{}, + Tags: map[string]string{ + "index": "3", + }, Fields: map[string]interface{}{ "myfield3": float64(9.999), }, From 0fa90014532e57139bf2987b0b908889da712a63 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 24 Mar 2017 16:01:35 -0700 Subject: [PATCH 030/201] Clarify influxdb output url format closes #2568 --- plugins/outputs/influxdb/README.md | 5 ++++- plugins/outputs/influxdb/influxdb.go | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index 864177a36e0dd..5acac6cca9b3f 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -7,7 +7,10 @@ This plugin writes to [InfluxDB](https://www.influxdb.com) via HTTP or UDP. ```toml # Configuration for influxdb server to send metrics to [[outputs.influxdb]] - ## The full HTTP or UDP endpoint URL for your InfluxDB instance. + ## The HTTP or UDP URL for your InfluxDB instance. Each item should be + ## of the form: + ## scheme "://" host [ ":" port] + ## ## Multiple urls can be specified as part of the same cluster, ## this means that only ONE of the urls will be written to each interval. # urls = ["udp://localhost:8089"] # UDP endpoint example diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 6c19a35fc8312..6419d43ea997a 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -44,7 +44,10 @@ type InfluxDB struct { } var sampleConfig = ` - ## The full HTTP or UDP endpoint URL for your InfluxDB instance. + ## The HTTP or UDP URL for your InfluxDB instance. Each item should be + ## of the form: + ## scheme "://" host [ ":" port] + ## ## Multiple urls can be specified as part of the same cluster, ## this means that only ONE of the urls will be written to each interval. # urls = ["udp://localhost:8089"] # UDP endpoint example From 5612df48f961face923bfd3bba9bf4c174a9da97 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 27 Mar 2017 14:49:04 -0700 Subject: [PATCH 031/201] Update telegraf.conf --- etc/telegraf.conf | 378 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 334 insertions(+), 44 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index aabdf180effc2..63e41d7bbcd45 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -81,7 +81,10 @@ # Configuration for influxdb server to send metrics to [[outputs.influxdb]] - ## The full HTTP or UDP endpoint URL for your InfluxDB instance. + ## The HTTP or UDP URL for your InfluxDB instance. Each item should be + ## of the form: + ## scheme "://" host [ ":" port] + ## ## Multiple urls can be specified as part of the same cluster, ## this means that only ONE of the urls will be written to each interval. # urls = ["udp://localhost:8089"] # UDP endpoint example @@ -131,6 +134,8 @@ # ## AMQP exchange # exchange = "telegraf" # ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html # # auth_method = "PLAIN" # ## Telegraf tag to use as a routing key # ## ie, if this tag exists, it's value will be used as the routing key @@ -193,6 +198,45 @@ # # no configuration +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option. +# enable_sniffer = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## HTTP basic authentication details (eg. when using Shield) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false + + # # Send telegraf metrics to file(s) # [[outputs.file]] # ## Files to write to, "stdout" is a specially handled file. @@ -443,7 +487,7 @@ # # expiration_interval = "60s" -# # Configuration for Riemann server to send metrics to +# # Configuration for the Riemann server to send metrics to # [[outputs.riemann]] # ## The full TCP or UDP URL of the Riemann server # url = "tcp://localhost:5555" @@ -472,9 +516,12 @@ # # ## Description for Riemann event # # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" -# # Configuration for the legacy Riemann plugin +# # Configuration for the Riemann server to send metrics to # [[outputs.riemann_legacy]] # ## URL of server # url = "localhost:5555" @@ -484,6 +531,27 @@ # separator = " " +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Data format to generate. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + ############################################################################### # PROCESSOR PLUGINS # @@ -531,7 +599,7 @@ ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually ## present on /run, /var/run, /dev/shm or /dev). - ignore_fs = ["tmpfs", "devtmpfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs"] # Read metrics about disk IO by device @@ -542,6 +610,23 @@ # devices = ["sda", "sdb"] ## Uncomment the following line if you need disk serial numbers. # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] # Get kernel statistics from /proc/stat @@ -658,7 +743,7 @@ # gather_admin_socket_stats = true # # ## Whether to gather statistics via ceph commands -# gather_cluster_stats = true +# gather_cluster_stats = false # # Read specific statistics per cgroup @@ -677,6 +762,12 @@ # # files = ["memory.*usage*", "memory.limit_in_bytes"] +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + # # Pull Metric Statistics from Amazon CloudWatch # [[inputs.cloudwatch]] # ## Amazon Region @@ -722,9 +813,10 @@ # namespace = "AWS/ELB" # # ## Maximum requests per second. Note that the global default AWS rate limit is -# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a -# ## maximum of 10. Optional - default value is 10. -# ratelimit = 10 +# ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 400. Optional - default value is 200. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# ratelimit = 200 # # ## Metrics to Pull (optional) # ## Defaults to all Metrics in Namespace if nothing is provided @@ -738,6 +830,22 @@ # # value = "p-example" +# # Collects conntrack stats from the configured directories and files. +# [[inputs.conntrack]] +# ## The following defaults would work with multiple versions of conntrack. +# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across +# ## kernel versions, as are the directory locations. +# +# ## Superset of filenames to look for within the conntrack dirs. +# ## Missing files will be ignored. +# files = ["ip_conntrack_count","ip_conntrack_max", +# "nf_conntrack_count","nf_conntrack_max"] +# +# ## Directories to search within for the conntrack files above. +# ## Missing directrories will be ignored. +# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] + + # # Gather health check statuses from services registered in Consul # [[inputs.consul]] # ## Most of these values defaults to the one configured on a Consul's agent level. @@ -957,6 +1065,24 @@ # ## Server address not starting with 'http' will be treated as a possible # ## socket, so both examples below are valid. # ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] +# # +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# ## keep_field_names = true + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] # # HTTP/HTTPS request given an address a method and a timeout @@ -977,6 +1103,11 @@ # # {'fake':'data'} # # ''' # +# ## Optional substring or regex match in body of the response +# ## response_string_match = "\"service_status\": \"up\"" +# ## response_string_match = "ok" +# ## response_string_match = "\".*_status\".?:.?\"up\"" +# # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" @@ -1050,14 +1181,37 @@ # # collect_memstats = true -# # Read metrics from one or many bare metal servers +# # Read metrics from the bare metal servers via IPMI # [[inputs.ipmi_sensor]] -# ## specify servers via a url matching: +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# # +# ## optionally specify one or more servers via a url matching # ## [username[:password]@][protocol[(address)]] # ## e.g. # ## root:passwd@lan(127.0.0.1) # ## -# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] + + +# # Gather packets and bytes throughput from iptables +# [[inputs.iptables]] +# ## iptables require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run iptables. +# ## Users must configure sudo to allow telegraf user to run iptables with no password. +# ## iptables can be restricted to only list command "iptables -nvL". +# use_sudo = false +# ## Setting 'use_lock' to true runs iptables with the "-w" option. +# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") +# use_lock = false +# ## defines the table to monitor: +# table = "filter" +# ## defines the chains to monitor. +# ## NOTE: iptables rules without a comment will not be monitored. +# ## Read the plugin documentation for more information. +# chains = [ "INPUT" ] # # Read JMX metrics through Jolokia @@ -1087,6 +1241,13 @@ # ## Includes connection time, any redirects, and reading the response body. # # client_timeout = "4s" # +# ## Attribute delimiter +# ## +# ## When multiple attributes are returned for a single +# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric +# ## name, and the attribute name, separated by the given delimiter. +# # delimiter = "_" +# # ## List of servers exposing jolokia read service # [[inputs.jolokia.servers]] # name = "as-server-01" @@ -1117,6 +1278,11 @@ # attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" +# # Get kernel statistics from /proc/vmstat +# [[inputs.kernel_vmstat]] +# # no configuration + + # # Read metrics from the kubernetes kubelet api # [[inputs.kubernetes]] # ## URL for the kubelet @@ -1216,6 +1382,13 @@ # ## 10.0.0.1:10000, etc. # servers = ["127.0.0.1:27017"] # gather_perdb_stats = false +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false # # Read metrics from one or many mysql servers @@ -1243,9 +1416,15 @@ # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST # gather_process_list = true # # +# ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS +# gather_user_statistics = true +# # # ## gather auto_increment columns and max values from information schema # gather_info_schema_auto_inc = true # # +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# gather_innodb_metrics = true +# # # ## gather metrics from SHOW SLAVE STATUS command output # gather_slave_status = true # # @@ -1417,7 +1596,7 @@ # # ignored_databases = ["postgres", "template0", "template1"] # # ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. Do NOT use with the 'ignore_databases' option. +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. # # databases = ["app_production", "testing"] @@ -1599,6 +1778,13 @@ # servers = ["http://localhost:8098"] +# # Monitor sensors, requires lm-sensors package +# [[inputs.sensors]] +# ## Remove numbers from field names. +# ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. +# # remove_numbers = true + + # # Retrieves SNMP values from remote agents # [[inputs.snmp]] # agents = [ "127.0.0.1:161" ] @@ -1775,6 +1961,68 @@ # # ] +# # Sysstat metrics collector +# [[inputs.sysstat]] +# ## Path to the sadc command. +# # +# ## Common Defaults: +# ## Debian/Ubuntu: /usr/lib/sysstat/sadc +# ## Arch: /usr/lib/sa/sadc +# ## RHEL/CentOS: /usr/lib64/sa/sadc +# sadc_path = "/usr/lib/sa/sadc" # required +# # +# # +# ## Path to the sadf command, if it is not in PATH +# # sadf_path = "/usr/bin/sadf" +# # +# # +# ## Activities is a list of activities, that are passed as argument to the +# ## sadc collector utility (e.g: DISK, SNMP etc...) +# ## The more activities that are added, the more data is collected. +# # activities = ["DISK"] +# # +# # +# ## Group metrics to measurements. +# ## +# ## If group is false each metric will be prefixed with a description +# ## and represents itself a measurement. +# ## +# ## If Group is true, corresponding metrics are grouped to a single measurement. +# # group = true +# # +# # +# ## Options for the sadf command. The values on the left represent the sadf +# ## options and the values on the right their description (wich are used for +# ## grouping and prefixing metrics). +# ## +# ## Run 'sar -h' or 'man sar' to find out the supported options for your +# ## sysstat version. +# [inputs.sysstat.options] +# -C = "cpu" +# -B = "paging" +# -b = "io" +# -d = "disk" # requires DISK activity +# "-n ALL" = "network" +# "-P ALL" = "per_cpu" +# -q = "queue" +# -R = "mem" +# -r = "mem_util" +# -S = "swap_util" +# -u = "cpu_util" +# -v = "inode" +# -W = "swap" +# -w = "task" +# # -H = "hugepages" # only available for newer linux distributions +# # "-I ALL" = "interrupts" # requires INT activity +# # +# # +# ## Device tags can be used to add additional tags for devices. +# ## For example the configuration below adds a tag vg with value rootvg for +# ## all metrics with sda devices. +# # [[inputs.sysstat.device_tags.sda]] +# # vg = "rootvg" + + # # Inserts sine and cosine waves for demonstration purposes # [[inputs.trig]] # ## Set the amplitude @@ -1830,6 +2078,39 @@ # SERVICE INPUT PLUGINS # ############################################################################### +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## AMQP url +# url = "amqp://localhost:5672/influxdb" +# ## AMQP exchange +# exchange = "telegraf" +# ## AMQP queue name +# queue = "telegraf" +# ## Binding Key +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# prefetch_count = 50 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + # # Influx HTTP write listener # [[inputs.http_listener]] # ## Address and port to host HTTP listener on @@ -1878,7 +2159,9 @@ # ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log # ## /var/log/apache.log -> only tail the apache log file # files = ["/var/log/apache/access.log"] -# ## Read file from beginning. +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. # from_beginning = false # # ## Parse logstash-style "grok" patterns: @@ -1976,6 +2259,38 @@ # data_format = "influx" +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Maximum socket buffer size in bytes. +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = 65535 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" + + # # Statsd Server # [[inputs.statsd]] # ## Address and port to host UDP listener on @@ -2045,41 +2360,16 @@ # # Generic TCP listener # [[inputs.tcp_listener]] -# ## Address and port to host TCP listener on -# # service_address = ":8094" -# -# ## Number of TCP messages allowed to queue up. Once filled, the -# ## TCP listener will start dropping packets. -# # allowed_pending_messages = 10000 -# -# ## Maximum number of concurrent TCP connections to allow -# # max_tcp_connections = 250 -# -# ## Data format to consume. -# ## Each data format has it's own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener # # Generic UDP listener # [[inputs.udp_listener]] -# ## Address and port to host UDP listener on -# # service_address = ":8092" -# -# ## Number of UDP messages allowed to queue up. Once filled, the -# ## UDP listener will start dropping packets. -# # allowed_pending_messages = 10000 -# -# ## Set the buffer size of the UDP connection outside of OS default (in bytes) -# ## If set to 0, take OS default -# udp_buffer_size = 16777216 -# -# ## Data format to consume. -# ## Each data format has it's own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" +# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener # # A Webhooks Event collector From 84a9f91f5c090561b93fb505a87970a9165a532e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 27 Mar 2017 15:05:06 -0700 Subject: [PATCH 032/201] Skip elasticsearch output integration test in short mode --- plugins/outputs/elasticsearch/elasticsearch_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go index 9163a2bbe7f03..9000676d9130c 100644 --- a/plugins/outputs/elasticsearch/elasticsearch_test.go +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -57,6 +57,10 @@ func TestTemplateManagementEmptyTemplate(t *testing.T) { } func TestTemplateManagement(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + urls := []string{"http://" + testutil.GetLocalHost() + ":9200"} e := &Elasticsearch{ From 78c7f4e4af827a96cacdea3593b85293e30ff745 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 27 Mar 2017 15:49:45 -0700 Subject: [PATCH 033/201] Add write timeout to Riemann output (#2576) --- CHANGELOG.md | 1 + plugins/outputs/riemann/README.md | 3 +++ plugins/outputs/riemann/riemann.go | 12 ++++++++++-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b1655f77d3b49..5da830d322f49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -84,6 +84,7 @@ be deprecated eventually. - [#2513](https://github.com/influxdata/telegraf/issues/2513): create /etc/telegraf/telegraf.d directory in tarball. - [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format. - [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier +- [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output ## v1.2.1 [2017-02-01] diff --git a/plugins/outputs/riemann/README.md b/plugins/outputs/riemann/README.md index 2338a00dc9d64..82615728cbabe 100644 --- a/plugins/outputs/riemann/README.md +++ b/plugins/outputs/riemann/README.md @@ -34,6 +34,9 @@ This plugin writes to [Riemann](http://riemann.io/) via TCP or UDP. ## Description for Riemann event # description_text = "metrics collected from telegraf" + + ## Riemann client write timeout, defaults to "5s" if not set. + # timeout = "5s" ``` ### Required parameters: diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index 25cf3011a1edf..1738ca537bab0 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -7,9 +7,11 @@ import ( "os" "sort" "strings" + "time" "github.com/amir/raidman" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -22,6 +24,7 @@ type Riemann struct { TagKeys []string Tags []string DescriptionText string + Timeout internal.Duration client *raidman.Client } @@ -54,6 +57,9 @@ var sampleConfig = ` ## Description for Riemann event # description_text = "metrics collected from telegraf" + + ## Riemann client write timeout, defaults to "5s" if not set. + # timeout = "5s" ` func (r *Riemann) Connect() error { @@ -62,7 +68,7 @@ func (r *Riemann) Connect() error { return err } - client, err := raidman.Dial(parsed_url.Scheme, parsed_url.Host) + client, err := raidman.DialWithTimeout(parsed_url.Scheme, parsed_url.Host, r.Timeout.Duration) if err != nil { r.client = nil return err @@ -212,6 +218,8 @@ func (r *Riemann) tags(tags map[string]string) []string { func init() { outputs.Add("riemann", func() telegraf.Output { - return &Riemann{} + return &Riemann{ + Timeout: internal.Duration{Duration: time.Second * 5}, + } }) } From 37689f4df60872948425b57baaada82e99872f6e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 Mar 2017 10:22:28 -0700 Subject: [PATCH 034/201] Add elasticsearch output to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5da830d322f49..2650e5716b236 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ be deprecated eventually. - [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin. - [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates. - [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags. +- [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output ### Bugfixes From 1100a98f11b3a90ee6aacf7294331caa3d3d919c Mon Sep 17 00:00:00 2001 From: mgresser Date: Tue, 28 Mar 2017 13:47:00 -0400 Subject: [PATCH 035/201] Removed duplicate evictions metric (#2577) --- plugins/inputs/memcached/memcached.go | 1 - plugins/inputs/memcached/memcached_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 5ee538e936095..d174abedafbdb 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -51,7 +51,6 @@ var sendMetrics = []string{ "decr_misses", "cas_hits", "cas_misses", - "evictions", "bytes_read", "bytes_written", "threads", diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go index 210adffdba587..436c978f7be92 100644 --- a/plugins/inputs/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -28,7 +28,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) { "limit_maxbytes", "bytes", "uptime", "curr_items", "total_items", "curr_connections", "total_connections", "connection_structures", "cmd_get", "cmd_set", "delete_hits", "delete_misses", "incr_hits", "incr_misses", - "decr_hits", "decr_misses", "cas_hits", "cas_misses", "evictions", + "decr_hits", "decr_misses", "cas_hits", "cas_misses", "bytes_read", "bytes_written", "threads", "conn_yields"} for _, metric := range intMetrics { From 9e036b2d6553b47c8d556a29b96e3b3215f4a338 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 28 Mar 2017 12:31:36 -0700 Subject: [PATCH 036/201] Remove wait loop in riemann tests This testcase still has a race condition but I believe it is when the test does not complete quickly enough. --- plugins/outputs/riemann/riemann_test.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go index 67a161be5ab1d..0b7c85403ece9 100644 --- a/plugins/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -193,17 +193,6 @@ func TestConnectAndWrite(t *testing.T) { err = r.Write(metrics) require.NoError(t, err) - start := time.Now() - for true { - events, _ := r.client.Query(`tagged "docker"`) - if len(events) > 0 { - break - } - if time.Since(start) > time.Second { - break - } - } - // are there any "docker" tagged events in Riemann? events, err := r.client.Query(`tagged "docker"`) require.NoError(t, err) From 2d7f612bd70b8832e464dd908a7d976cb3a408c4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Mar 2017 14:25:33 -0700 Subject: [PATCH 037/201] Use fork of hpcloud/tail (#2595) --- Godeps | 2 +- plugins/inputs/logparser/logparser.go | 2 +- plugins/inputs/tail/tail.go | 2 +- plugins/inputs/tail/tail_test.go | 2 -- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/Godeps b/Godeps index 6cbe9efa72c19..9717cec2fd5f1 100644 --- a/Godeps +++ b/Godeps @@ -21,7 +21,7 @@ github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7 github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f -github.com/hpcloud/tail 915e5feba042395f5fda4dbe9c0e99aeab3088b3 +github.com/influxdata/tail e9ef7e826dafcb3093b40b989fefa90eeb9a8ca1 github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index a228322772c13..c5641ba28ad05 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -6,7 +6,7 @@ import ( "reflect" "sync" - "github.com/hpcloud/tail" + "github.com/influxdata/tail" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/errchan" diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 0c19f91164b8e..f57d970cf3a56 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/hpcloud/tail" + "github.com/influxdata/tail" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index b927d160c9c80..7ddb502f946dc 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -5,7 +5,6 @@ import ( "os" "runtime" "testing" - "time" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -59,7 +58,6 @@ func TestTailFromEnd(t *testing.T) { acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) - time.Sleep(time.Millisecond * 200) //TODO remove once https://github.com/hpcloud/tail/pull/114 is merged & added to Godeps for _, tailer := range tt.tailers { for n, err := tailer.Tell(); err == nil && n == 0; n, err = tailer.Tell() { // wait for tailer to jump to end From cc5b2f68b69ec76886555cbae9c08b75155a6ffc Mon Sep 17 00:00:00 2001 From: djjorjinho Date: Wed, 29 Mar 2017 23:04:29 +0100 Subject: [PATCH 038/201] fix timestamp parsing on prometheus plugin (#2596) --- CHANGELOG.md | 1 + plugins/inputs/prometheus/prometheus.go | 10 +++++----- plugins/inputs/prometheus/prometheus_test.go | 7 +++++++ testutil/accumulator.go | 13 +++++++++++++ 4 files changed, 26 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2650e5716b236..f9a29d075b5d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -86,6 +86,7 @@ be deprecated eventually. - [#2541](https://github.com/influxdata/telegraf/issues/2541): Return error on unsupported serializer data format. - [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier - [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output +- [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 97da17f04751f..c1212796534d5 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -3,14 +3,15 @@ package prometheus import ( "errors" "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/inputs" "io/ioutil" "net" "net/http" "sync" "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" ) const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3` @@ -91,7 +92,6 @@ var client = &http.Client{ } func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { - collectDate := time.Now() var req, err = http.NewRequest("GET", url, nil) req.Header.Add("Accept", acceptHeader) var token []byte @@ -145,7 +145,7 @@ func (p *Prometheus) gatherURL(url string, acc telegraf.Accumulator) error { for _, metric := range metrics { tags := metric.Tags() tags["url"] = url - acc.AddFields(metric.Name(), metric.Fields(), tags, collectDate) + acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) } return nil diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 8a8fea9e39d1b..4b316a3b48c24 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -5,6 +5,7 @@ import ( "net/http" "net/http/httptest" "testing" + "time" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -23,6 +24,9 @@ go_gc_duration_seconds_count 7 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 15 +# HELP test_metric An untyped metric with a timestamp +# TYPE test_metric untyped +test_metric{label="value"} 1.0 1490802350000 ` func TestPrometheusGeneratesMetrics(t *testing.T) { @@ -42,4 +46,7 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) + assert.True(t, acc.HasFloatField("test_metric", "value")) + assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + } diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 02bebf9c8b68c..b958e8cc9e974 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -275,6 +275,19 @@ func (a *Accumulator) AssertDoesNotContainMeasurement(t *testing.T, measurement } } +// HasTimestamp returns true if the measurement has a matching Time value +func (a *Accumulator) HasTimestamp(measurement string, timestamp time.Time) bool { + a.Lock() + defer a.Unlock() + for _, p := range a.Metrics { + if p.Measurement == measurement { + return timestamp.Equal(p.Time) + } + } + + return false +} + // HasIntField returns true if the measurement has an Int value func (a *Accumulator) HasIntField(measurement string, field string) bool { a.Lock() From 03ee6022f305dd0d73bb03dbb2fd3d81c9909a1f Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 29 Mar 2017 20:03:06 -0400 Subject: [PATCH 039/201] fix race in testutil Accumulator.Wait() (#2598) --- testutil/accumulator.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testutil/accumulator.go b/testutil/accumulator.go index b958e8cc9e974..9ebf77cf729c5 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -43,9 +43,9 @@ func (a *Accumulator) NMetrics() uint64 { } func (a *Accumulator) ClearMetrics() { - atomic.StoreUint64(&a.nMetrics, 0) a.Lock() defer a.Unlock() + atomic.StoreUint64(&a.nMetrics, 0) a.Metrics = make([]*Metric, 0) } @@ -56,9 +56,9 @@ func (a *Accumulator) AddFields( tags map[string]string, timestamp ...time.Time, ) { - atomic.AddUint64(&a.nMetrics, 1) a.Lock() defer a.Unlock() + atomic.AddUint64(&a.nMetrics, 1) if a.Cond != nil { a.Cond.Broadcast() } From fb1c7d01547bcb82ffd5583b1f7c38565c487335 Mon Sep 17 00:00:00 2001 From: tjmcs Date: Wed, 29 Mar 2017 17:12:29 -0700 Subject: [PATCH 040/201] Adds a new json_timestamp_units configuration parameter (#2587) --- docs/DATA_FORMATS_OUTPUT.md | 10 ++++++++++ internal/config/config.go | 20 +++++++++++++++++++- plugins/serializers/json/json.go | 10 +++++++++- plugins/serializers/registry.go | 12 ++++++++---- 4 files changed, 46 insertions(+), 6 deletions(-) diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 177734d16eda1..633460846fab9 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -147,4 +147,14 @@ The JSON data format serialized Telegraf metrics in json format. The format is: ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "json" + json_timestamp_units = "1ns" ``` + +By default, the timestamp that is output in JSON data format serialized Telegraf +metrics is in seconds. The precision of this timestamp can be adjusted for any output +by adding the optional `json_timestamp_units` parameter to the configuration for +that output. This parameter can be used to set the timestamp units to nanoseconds (`ns`), +microseconds (`us` or `µs`), milliseconds (`ms`), or seconds (`s`). Note that this +parameter will be truncated to the nearest power of 10 that, so if the `json_timestamp_units` +are set to `15ms` the timestamps for the JSON format serialized Telegraf metrics will be +output in hundredths of a second (`10ms`). diff --git a/internal/config/config.go b/internal/config/config.go index 651c4e9ef4bf3..013e81c1259a8 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" "log" + "math" "os" "path/filepath" "regexp" @@ -1244,7 +1245,7 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { // a serializers.Serializer object, and creates it, which can then be added onto // an Output object. func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) { - c := &serializers.Config{} + c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)} if node, ok := tbl.Fields["data_format"]; ok { if kv, ok := node.(*ast.KeyValue); ok { @@ -1274,9 +1275,26 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["json_timestamp_units"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + timestampVal, err := time.ParseDuration(str.Value) + if err != nil { + return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err) + } + // now that we have a duration, truncate it to the nearest + // power of ten (just in case) + nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds()))) + new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent))) + c.TimestampUnits = time.Duration(new_nanoseconds) + } + } + } + delete(tbl.Fields, "data_format") delete(tbl.Fields, "prefix") delete(tbl.Fields, "template") + delete(tbl.Fields, "json_timestamp_units") return serializers.NewSerializer(c) } diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index 3e259fafd3b43..452364c950a14 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -2,19 +2,27 @@ package json import ( ejson "encoding/json" + "time" "github.com/influxdata/telegraf" ) type JsonSerializer struct { + TimestampUnits time.Duration } func (s *JsonSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { m := make(map[string]interface{}) + units_nanoseconds := s.TimestampUnits.Nanoseconds() + // if the units passed in were less than or equal to zero, + // then serialize the timestamp in seconds (the default) + if units_nanoseconds <= 0 { + units_nanoseconds = 1000000000 + } m["tags"] = metric.Tags() m["fields"] = metric.Fields() m["name"] = metric.Name() - m["timestamp"] = metric.UnixNano() / 1000000000 + m["timestamp"] = metric.UnixNano() / units_nanoseconds serialized, err := ejson.Marshal(m) if err != nil { return []byte{}, err diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index cb1e03b46a6ea..368f6f4494694 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -2,6 +2,7 @@ package serializers import ( "fmt" + "time" "github.com/influxdata/telegraf" @@ -29,7 +30,7 @@ type Serializer interface { // Config is a struct that covers the data types needed for all serializer types, // and can be used to instantiate _any_ of the serializers. type Config struct { - // Dataformat can be one of: influx, graphite + // Dataformat can be one of: influx, graphite, or json DataFormat string // Prefix to add to all measurements, only supports Graphite @@ -38,6 +39,9 @@ type Config struct { // Template for converting telegraf metrics into Graphite // only supports Graphite Template string + + // Timestamp units to use for JSON formatted output + TimestampUnits time.Duration } // NewSerializer a Serializer interface based on the given config. @@ -50,15 +54,15 @@ func NewSerializer(config *Config) (Serializer, error) { case "graphite": serializer, err = NewGraphiteSerializer(config.Prefix, config.Template) case "json": - serializer, err = NewJsonSerializer() + serializer, err = NewJsonSerializer(config.TimestampUnits) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } return serializer, err } -func NewJsonSerializer() (Serializer, error) { - return &json.JsonSerializer{}, nil +func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) { + return &json.JsonSerializer{TimestampUnits: timestampUnits}, nil } func NewInfluxSerializer() (Serializer, error) { From 9495b615f55a937e0f1a79670451227d3df5166d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 29 Mar 2017 17:14:57 -0700 Subject: [PATCH 041/201] Update changelog for #2587 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9a29d075b5d4..2ab69960065b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,7 @@ be deprecated eventually. - [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates. - [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags. - [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output +- [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability ### Bugfixes From c980c92cd5d7f5d4d4b3f3c81401c70a65eb5a0a Mon Sep 17 00:00:00 2001 From: Dmitry Ulyanov Date: Thu, 30 Mar 2017 04:28:43 +0300 Subject: [PATCH 042/201] Added pprof tool (#2512) --- CHANGELOG.md | 1 + cmd/telegraf/telegraf.go | 25 +++++++++++++++++++++++++ docs/PROFILING.md | 24 ++++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 docs/PROFILING.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ab69960065b8..9f90157f752f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ be deprecated eventually. - [#2339](https://github.com/influxdata/telegraf/pull/2339): Increment gather_errors for all errors emitted by inputs. - [#2071](https://github.com/influxdata/telegraf/issues/2071): Use official docker SDK. - [#1678](https://github.com/influxdata/telegraf/pull/1678): Add AMQP consumer input plugin +- [#2512](https://github.com/influxdata/telegraf/pull/2512): Added pprof tool. - [#2501](https://github.com/influxdata/telegraf/pull/2501): Support DEAD(X) state in system input plugin. - [#2522](https://github.com/influxdata/telegraf/pull/2522): Add support for mongodb client certificates. - [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags. diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 16f7845d0507a..40e90a1ec6b6b 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -4,6 +4,8 @@ import ( "flag" "fmt" "log" + "net/http" + _ "net/http/pprof" // Comment this line to disable pprof endpoint. "os" "os/signal" "runtime" @@ -24,6 +26,8 @@ import ( var fDebug = flag.Bool("debug", false, "turn on debug logging") +var pprofAddr = flag.String("pprof-addr", "", + "pprof address to listen on, not activate pprof if empty") var fQuiet = flag.Bool("quiet", false, "run in quiet mode") var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") @@ -87,6 +91,7 @@ The commands & flags are: --output-filter filter the output plugins to enable, separator is : --usage print usage for a plugin, ie, 'telegraf --usage mysql' --debug print metrics as they're generated to stdout + --pprof-addr pprof address to listen on, format: localhost:6060 or :6060 --quiet run in quiet mode Examples: @@ -105,6 +110,9 @@ Examples: # run telegraf, enabling the cpu & memory input, and influxdb output plugins telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb + + # run telegraf with pprof + telegraf --config telegraf.conf --pprof-addr localhost:6060 ` var stop chan struct{} @@ -267,6 +275,23 @@ func main() { processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":") } + if *pprofAddr != "" { + go func() { + pprofHostPort := *pprofAddr + parts := strings.Split(pprofHostPort, ":") + if len(parts) == 2 && parts[0] == "" { + pprofHostPort = fmt.Sprintf("localhost:%s", parts[1]) + } + pprofHostPort = "http://" + pprofHostPort + "/debug/pprof" + + log.Printf("I! Starting pprof HTTP server at: %s", pprofHostPort) + + if err := http.ListenAndServe(*pprofAddr, nil); err != nil { + log.Fatal("E! " + err.Error()) + } + }() + } + if len(args) > 0 { switch args[0] { case "version": diff --git a/docs/PROFILING.md b/docs/PROFILING.md new file mode 100644 index 0000000000000..a0851c8f18b12 --- /dev/null +++ b/docs/PROFILING.md @@ -0,0 +1,24 @@ +# Telegraf profiling + +Telegraf uses the standard package `net/http/pprof`. This package serves via its HTTP server runtime profiling data in the format expected by the pprof visualization tool. + +By default, the profiling is turned off. + +To enable profiling you need to specify address to config parameter `pprof-addr`, for example: + +``` +telegraf --config telegraf.conf --pprof-addr localhost:6060 +``` + +There are several paths to get different profiling information: + +To look at the heap profile: + +`go tool pprof http://localhost:6060/debug/pprof/heap` + +or to look at a 30-second CPU profile: + +`go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30` + +To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser. + From 540f98e2280f5fa85b0cdbd33b16cb95691cf2a4 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 31 Mar 2017 12:45:28 -0700 Subject: [PATCH 043/201] Fix possible deadlock when output cannot write. (#2610) --- CHANGELOG.md | 1 + internal/buffer/buffer.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f90157f752f6..cf7c31c4b841f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,6 +89,7 @@ be deprecated eventually. - [#1827](https://github.com/influxdata/telegraf/issues/1827): Fix Windows Performance Counters multi instance identifier - [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output - [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin +- [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write ## v1.2.1 [2017-02-01] diff --git a/internal/buffer/buffer.go b/internal/buffer/buffer.go index 5e7818ef12740..cdc81fed304d6 100644 --- a/internal/buffer/buffer.go +++ b/internal/buffer/buffer.go @@ -45,9 +45,11 @@ func (b *Buffer) Add(metrics ...telegraf.Metric) { select { case b.buf <- metrics[i]: default: + b.mu.Lock() MetricsDropped.Incr(1) <-b.buf b.buf <- metrics[i] + b.mu.Unlock() } } } From 51c99d5b67f7a6d58d801e4dfcb041f3ade74fb6 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 31 Mar 2017 17:01:02 -0400 Subject: [PATCH 044/201] add support for linux sysctl fs metrics (#2609) --- CHANGELOG.md | 1 + README.md | 1 + .../inputs/system/LINUX_SYSCTL_FS_README.md | 9 ++ plugins/inputs/system/linux_sysctl_fs.go | 88 +++++++++++++++++++ plugins/inputs/system/linux_sysctl_fs_test.go | 41 +++++++++ 5 files changed, 140 insertions(+) create mode 100644 plugins/inputs/system/LINUX_SYSCTL_FS_README.md create mode 100644 plugins/inputs/system/linux_sysctl_fs.go create mode 100644 plugins/inputs/system/linux_sysctl_fs_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index cf7c31c4b841f..fa4b820c7e341 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ be deprecated eventually. - [#1948](https://github.com/influxdata/telegraf/pull/1948): Support adding SNMP table indexes as tags. - [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output - [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability +- [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics. ### Bugfixes diff --git a/README.md b/README.md index 90686271439aa..55154e36a8994 100644 --- a/README.md +++ b/README.md @@ -174,6 +174,7 @@ configuration options. * processes * kernel (/proc/stat) * kernel (/proc/vmstat) + * linux_sysctl_fs (/proc/sys/fs) Telegraf can also collect metrics via the following service plugins: diff --git a/plugins/inputs/system/LINUX_SYSCTL_FS_README.md b/plugins/inputs/system/LINUX_SYSCTL_FS_README.md new file mode 100644 index 0000000000000..e9341c322d05d --- /dev/null +++ b/plugins/inputs/system/LINUX_SYSCTL_FS_README.md @@ -0,0 +1,9 @@ +# Linux Sysctl FS Input + +The linux_sysctl_fs input provides Linux system level file metrics. The documentation on these fields can be found at https://www.kernel.org/doc/Documentation/sysctl/fs.txt. + +Example output: + +``` +> linux_sysctl_fs,host=foo dentry-want-pages=0i,file-max=44222i,aio-max-nr=65536i,inode-preshrink-nr=0i,dentry-nr=64340i,dentry-unused-nr=55274i,file-nr=1568i,aio-nr=0i,inode-nr=35952i,inode-free-nr=12957i,dentry-age-limit=45i 1490982022000000000 +``` diff --git a/plugins/inputs/system/linux_sysctl_fs.go b/plugins/inputs/system/linux_sysctl_fs.go new file mode 100644 index 0000000000000..93e426e759af6 --- /dev/null +++ b/plugins/inputs/system/linux_sysctl_fs.go @@ -0,0 +1,88 @@ +package system + +import ( + "bytes" + "io/ioutil" + "strconv" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// https://www.kernel.org/doc/Documentation/sysctl/fs.txt +type SysctlFS struct { + path string +} + +var sysctlFSDescription = `Provides Linux sysctl fs metrics` +var sysctlFSSampleConfig = `` + +func (_ SysctlFS) Description() string { + return sysctlFSDescription +} +func (_ SysctlFS) SampleConfig() string { + return sysctlFSSampleConfig +} + +func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fieldNames ...string) error { + bs, err := ioutil.ReadFile(sfs.path + "/" + file) + if err != nil { + return err + } + + bsplit := bytes.Split(bytes.TrimRight(bs, "\n"), []byte{'\t'}) + for i, name := range fieldNames { + if i >= len(bsplit) { + break + } + if name == "" { + continue + } + + v, err := strconv.ParseUint(string(bsplit[i]), 10, 64) + if err != nil { + return err + } + fields[name] = v + } + + return nil +} + +func (sfs *SysctlFS) gatherOne(name string, fields map[string]interface{}) error { + bs, err := ioutil.ReadFile(sfs.path + "/" + name) + if err != nil { + return err + } + + v, err := strconv.ParseUint(string(bytes.TrimRight(bs, "\n")), 10, 64) + if err != nil { + return err + } + + fields[name] = v + return nil +} + +func (sfs *SysctlFS) Gather(acc telegraf.Accumulator) error { + fields := map[string]interface{}{} + + for _, n := range []string{"aio-nr", "aio-max-nr", "dquot-nr", "dquot-max", "super-nr", "super-max"} { + sfs.gatherOne(n, fields) + } + + sfs.gatherList("inode-state", fields, "inode-nr", "inode-free-nr", "inode-preshrink-nr") + sfs.gatherList("dentry-state", fields, "dentry-nr", "dentry-unused-nr", "dentry-age-limit", "dentry-want-pages") + sfs.gatherList("file-nr", fields, "file-nr", "", "file-max") + + acc.AddFields("linux_sysctl_fs", fields, nil) + return nil +} + +func init() { + inputs.Add("linux_sysctl_fs", func() telegraf.Input { + return &SysctlFS{ + path: "/proc/sys/fs", + } + }) +} diff --git a/plugins/inputs/system/linux_sysctl_fs_test.go b/plugins/inputs/system/linux_sysctl_fs_test.go new file mode 100644 index 0000000000000..6561465cb1452 --- /dev/null +++ b/plugins/inputs/system/linux_sysctl_fs_test.go @@ -0,0 +1,41 @@ +package system + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSysctlFSGather(t *testing.T) { + td, err := ioutil.TempDir("", "") + require.NoError(t, err) + defer os.RemoveAll(td) + + require.NoError(t, ioutil.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) + require.NoError(t, ioutil.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) + require.NoError(t, ioutil.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) + require.NoError(t, ioutil.WriteFile(td+"/super-max", []byte("103\n"), 0644)) + require.NoError(t, ioutil.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) + require.NoError(t, ioutil.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) + + sfs := &SysctlFS{ + path: td, + } + var acc testutil.Accumulator + require.NoError(t, sfs.Gather(&acc)) + + acc.AssertContainsFields(t, "linux_sysctl_fs", map[string]interface{}{ + "aio-nr": uint64(100), + "aio-max-nr": uint64(101), + "super-nr": uint64(102), + "super-max": uint64(103), + "file-nr": uint64(104), + "file-max": uint64(106), + "inode-nr": uint64(107), + "inode-free-nr": uint64(108), + "inode-preshrink-nr": uint64(109), + }) +} From 35e439016825b49d6b41223b7494f433c21eceb4 Mon Sep 17 00:00:00 2001 From: Shakeel Sorathia Date: Mon, 3 Apr 2017 13:43:15 -0700 Subject: [PATCH 045/201] Docker: optionally add labels as tags (#2425) --- CHANGELOG.md | 1 + plugins/inputs/docker/README.md | 24 +++++++---- plugins/inputs/docker/docker.go | 61 ++++++++++++++++++++++++---- plugins/inputs/docker/docker_test.go | 55 +++++++++++++++++++++++++ plugins/inputs/docker/fake_client.go | 8 ++++ 5 files changed, 134 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa4b820c7e341..e7da095d4eba7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ be deprecated eventually. - [#2332](https://github.com/influxdata/telegraf/pull/2332): Add Elasticsearch 5.x output - [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability - [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics. +- [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags ### Bugfixes diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 94965213ff3c6..849450b33cd40 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -30,6 +30,12 @@ for the stat structure can be found perdevice = true ## Whether to report for each container total blkio and network stats or not total = false + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + docker_label_include = [] + docker_label_exclude = [] + ``` ### Measurements & Fields: @@ -130,30 +136,32 @@ based on the availability of per-cpu stats on your system. ### Tags: - +#### Docker Engine tags - docker (memory_total) - unit=bytes + - engine_host - docker (pool_blocksize) - unit=bytes + - engine_host - docker_data - unit=bytes + - engine_host - docker_metadata - unit=bytes + - engine_host -- docker_container_mem specific: +#### Docker Container tags +- Tags on all containers: + - engine_host - container_image - container_name + - container_version +- docker_container_mem specific: - docker_container_cpu specific: - - container_image - - container_name - cpu - docker_container_net specific: - - container_image - - container_name - network - docker_container_blkio specific: - - container_image - - container_name - device ### Example Output: diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index ec192efd5e5d3..47d1db14b09ec 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -14,24 +14,34 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/client" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +type DockerLabelFilter struct { + labelInclude filter.Filter + labelExclude filter.Filter +} + // Docker object type Docker struct { Endpoint string ContainerNames []string Timeout internal.Duration - PerDevice bool `toml:"perdevice"` - Total bool `toml:"total"` + PerDevice bool `toml:"perdevice"` + Total bool `toml:"total"` + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` + + LabelFilter DockerLabelFilter client *client.Client engine_host string - testing bool + testing bool + labelFiltersCreated bool } // infoWrapper wraps client.Client.List for testing. @@ -99,6 +109,10 @@ var sampleConfig = ` ## Whether to report for each container total blkio and network stats or not total = false + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + docker_label_include = [] + docker_label_exclude = [] ` // Description returns input description @@ -133,6 +147,14 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } d.client = c } + // Create label filters if not already created + if !d.labelFiltersCreated { + err := d.createLabelFilters() + if err != nil { + return err + } + d.labelFiltersCreated = true + } // Get daemon info err := d.gatherInfo(acc) @@ -293,7 +315,11 @@ func (d *Docker) gatherContainer( // Add labels to tags for k, label := range container.Labels { - tags[k] = label + if len(d.LabelInclude) == 0 || d.LabelFilter.labelInclude.Match(k) { + if len(d.LabelExclude) == 0 || !d.LabelFilter.labelExclude.Match(k) { + tags[k] = label + } + } } gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total) @@ -599,11 +625,32 @@ func parseSize(sizeStr string) (int64, error) { return int64(size), nil } +func (d *Docker) createLabelFilters() error { + if len(d.LabelInclude) != 0 && d.LabelFilter.labelInclude == nil { + var err error + d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude) + if err != nil { + return err + } + } + + if len(d.LabelExclude) != 0 && d.LabelFilter.labelExclude == nil { + var err error + d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude) + if err != nil { + return err + } + } + + return nil +} + func init() { inputs.Add("docker", func() telegraf.Input { return &Docker{ - PerDevice: true, - Timeout: internal.Duration{Duration: time.Second * 5}, + PerDevice: true, + Timeout: internal.Duration{Duration: time.Second * 5}, + labelFiltersCreated: false, } }) } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index f0add03ea20c4..3e2e1607b4c08 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -244,6 +244,57 @@ func testStats() *types.StatsJSON { return stats } +var gatherLabelsTests = []struct { + include []string + exclude []string + expected []string + notexpected []string +}{ + {[]string{}, []string{}, []string{"label1", "label2"}, []string{}}, + {[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}}, + {[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}}, + {[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}}, + {[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}}, + {[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}}, + {[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}}, + {[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}}, + {[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}}, +} + +func TestDockerGatherLabels(t *testing.T) { + for _, tt := range gatherLabelsTests { + var acc testutil.Accumulator + d := Docker{ + client: nil, + testing: true, + } + + for _, label := range tt.include { + d.LabelInclude = append(d.LabelInclude, label) + } + for _, label := range tt.exclude { + d.LabelExclude = append(d.LabelExclude, label) + } + + err := d.Gather(&acc) + require.NoError(t, err) + + for _, label := range tt.expected { + if !acc.HasTag("docker_container_cpu", label) { + t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s", + label, tt.include, tt.exclude) + } + } + + for _, label := range tt.notexpected { + if acc.HasTag("docker_container_cpu", label) { + t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s", + label, tt.include, tt.exclude) + } + } + } +} + func TestDockerGatherInfo(t *testing.T) { var acc testutil.Accumulator d := Docker{ @@ -294,6 +345,8 @@ func TestDockerGatherInfo(t *testing.T) { "cpu": "cpu3", "container_version": "v2.2.2", "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", }, ) acc.AssertContainsTaggedFields(t, @@ -340,6 +393,8 @@ func TestDockerGatherInfo(t *testing.T) { "container_name": "etcd2", "container_image": "quay.io:4443/coreos/etcd", "container_version": "v2.2.2", + "label1": "test_value_1", + "label2": "test_value_2", }, ) diff --git a/plugins/inputs/docker/fake_client.go b/plugins/inputs/docker/fake_client.go index 03da23198a890..dcca6f235ac25 100644 --- a/plugins/inputs/docker/fake_client.go +++ b/plugins/inputs/docker/fake_client.go @@ -92,6 +92,10 @@ func (d FakeDockerClient) ContainerList(octx context.Context, options types.Cont IP: "0.0.0.0", }, }, + Labels: map[string]string{ + "label1": "test_value_1", + "label2": "test_value_2", + }, SizeRw: 0, SizeRootFs: 0, } @@ -125,6 +129,10 @@ func (d FakeDockerClient) ContainerList(octx context.Context, options types.Cont IP: "0.0.0.0", }, }, + Labels: map[string]string{ + "label1": "test_value_1", + "label2": "test_value_2", + }, SizeRw: 0, SizeRootFs: 0, } From f2805fd4aa8eaba02127e68291427592bb3b0d68 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 3 Apr 2017 21:06:51 -0400 Subject: [PATCH 046/201] socket_listener: clean up unix socket file on start & stop (#2618) --- .../inputs/socket_listener/socket_listener.go | 35 +++++++++++++++++-- .../socket_listener/socket_listener_test.go | 8 +++-- .../socket_writer/socket_writer_test.go | 2 ++ 3 files changed, 40 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index 9d3a8e1fe4ece..b5c0202cc1219 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -6,6 +6,7 @@ import ( "io" "log" "net" + "os" "strings" "sync" @@ -32,7 +33,9 @@ func (ssl *streamSocketListener) listen() { for { c, err := ssl.Accept() if err != nil { - ssl.AddError(err) + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + ssl.AddError(err) + } break } @@ -78,7 +81,9 @@ func (ssl *streamSocketListener) read(c net.Conn) { } if err := scnr.Err(); err != nil { - ssl.AddError(err) + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + ssl.AddError(err) + } } } @@ -92,7 +97,9 @@ func (psl *packetSocketListener) listen() { for { n, _, err := psl.ReadFrom(buf) if err != nil { - psl.AddError(err) + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + psl.AddError(err) + } break } @@ -170,6 +177,13 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return fmt.Errorf("invalid service address: %s", sl.ServiceAddress) } + if spl[0] == "unix" || spl[0] == "unixpacket" || spl[0] == "unixgram" { + // no good way of testing for "file does not exist". + // Instead just ignore error and blow up when we try to listen, which will + // indicate "address already in use" if file existed and we couldn't remove. + os.Remove(spl[1]) + } + switch spl[0] { case "tcp", "tcp4", "tcp6", "unix", "unixpacket": l, err := net.Listen(spl[0], spl[1]) @@ -217,6 +231,10 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return fmt.Errorf("unknown protocol '%s' in '%s'", spl[0], sl.ServiceAddress) } + if spl[0] == "unix" || spl[0] == "unixpacket" || spl[0] == "unixgram" { + sl.Closer = unixCloser{path: spl[1], closer: sl.Closer} + } + return nil } @@ -235,6 +253,17 @@ func newSocketListener() *SocketListener { } } +type unixCloser struct { + path string + closer io.Closer +} + +func (uc unixCloser) Close() error { + err := uc.closer.Close() + os.Remove(uc.path) // ignore error + return err +} + func init() { inputs.Add("socket_listener", func() telegraf.Input { return newSocketListener() }) } diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index 9fa472809b8e3..b263e5082c6da 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -18,6 +18,7 @@ func TestSocketListener_tcp(t *testing.T) { acc := &testutil.Accumulator{} err := sl.Start(acc) require.NoError(t, err) + defer sl.Stop() client, err := net.Dial("tcp", sl.Closer.(net.Listener).Addr().String()) require.NoError(t, err) @@ -32,6 +33,7 @@ func TestSocketListener_udp(t *testing.T) { acc := &testutil.Accumulator{} err := sl.Start(acc) require.NoError(t, err) + defer sl.Stop() client, err := net.Dial("udp", sl.Closer.(net.PacketConn).LocalAddr().String()) require.NoError(t, err) @@ -40,13 +42,14 @@ func TestSocketListener_udp(t *testing.T) { } func TestSocketListener_unix(t *testing.T) { - defer os.Remove("/tmp/telegraf_test.sock") + os.Create("/tmp/telegraf_test.sock") sl := newSocketListener() sl.ServiceAddress = "unix:///tmp/telegraf_test.sock" acc := &testutil.Accumulator{} err := sl.Start(acc) require.NoError(t, err) + defer sl.Stop() client, err := net.Dial("unix", "/tmp/telegraf_test.sock") require.NoError(t, err) @@ -55,13 +58,14 @@ func TestSocketListener_unix(t *testing.T) { } func TestSocketListener_unixgram(t *testing.T) { - defer os.Remove("/tmp/telegraf_test.sock") + os.Create("/tmp/telegraf_test.sock") sl := newSocketListener() sl.ServiceAddress = "unixgram:///tmp/telegraf_test.sock" acc := &testutil.Accumulator{} err := sl.Start(acc) require.NoError(t, err) + defer sl.Stop() client, err := net.Dial("unixgram", "/tmp/telegraf_test.sock") require.NoError(t, err) diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 3ab9d1e3432bc..6be2b0905f7a6 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -44,6 +44,7 @@ func TestSocketWriter_udp(t *testing.T) { } func TestSocketWriter_unix(t *testing.T) { + os.Remove("/tmp/telegraf_test.sock") defer os.Remove("/tmp/telegraf_test.sock") listener, err := net.Listen("unix", "/tmp/telegraf_test.sock") require.NoError(t, err) @@ -61,6 +62,7 @@ func TestSocketWriter_unix(t *testing.T) { } func TestSocketWriter_unixgram(t *testing.T) { + os.Remove("/tmp/telegraf_test.sock") defer os.Remove("/tmp/telegraf_test.sock") listener, err := net.ListenPacket("unixgram", "/tmp/telegraf_test.sock") require.NoError(t, err) From 8bf193dc064f27f917d3aeb0549d792e41d8f013 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 3 Apr 2017 18:34:04 -0700 Subject: [PATCH 047/201] Update httpjson documentation (#2619) closes #2536 --- plugins/inputs/EXAMPLE_README.md | 2 +- plugins/inputs/httpjson/README.md | 201 +++++++++------------------- plugins/inputs/httpjson/httpjson.go | 17 ++- 3 files changed, 77 insertions(+), 143 deletions(-) diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index d6fcfdb918523..a38064a7ad5f2 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -27,7 +27,7 @@ The example plugin gathers metrics about example things - tag2 - measurement2 has the following tags: - tag3 - + ### Sample Queries: These are some useful queries (to generate dashboards or other) to run against data from this plugin: diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index c7c0e67976505..1aa1ad1a4fa91 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -1,174 +1,110 @@ -# HTTP JSON Plugin +# HTTP JSON Input Plugin -The httpjson plugin can collect data from remote URLs which respond with JSON. Then it flattens JSON and finds all numeric values, treating them as floats. +The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. -For example, if you have a service called _mycollector_, which has HTTP endpoint for gathering stats at http://my.service.com/_stats, you would configure the HTTP JSON plugin like this: +### Configuration: -``` +```toml [[inputs.httpjson]] - name = "mycollector" + ## NOTE This plugin only reads numerical measurements, strings and booleans + ## will be ignored. + + ## Name for the service being polled. Will be appended to the name of the + ## measurement e.g. "httpjson_webserver_stats". + ## + ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. + name = "webserver_stats" + ## URL of each server in the service's cluster servers = [ - "http://my.service.com/_stats" + "http://localhost:9999/stats/", + "http://localhost:9998/stats/", ] - - # HTTP method to use (case-sensitive) - method = "GET" - - # Set response_timeout (default 5 seconds) + ## Set response_timeout (default 5 seconds) response_timeout = "5s" -``` - -`name` is used as a prefix for the measurements. -`method` specifies HTTP method to use for requests. + ## HTTP method to use: GET or POST (case-sensitive) + method = "GET" -`response_timeout` specifies timeout to wait to get the response + ## Tags to extract from top-level of JSON server response. + # tag_keys = [ + # "my_tag_1", + # "my_tag_2" + # ] -You can also specify which keys from server response should be considered tags: + ## HTTP Request Parameters (all values must be strings). For "GET" requests, data + ## will be included in the query. For "POST" requests, data will be included + ## in the request body as "x-www-form-urlencoded". + # [inputs.httpjson.parameters] + # event_type = "cpu_spike" + # threshold = "0.75" -``` -[[inputs.httpjson]] - ... + ## HTTP Request Headers (all values must be strings). + # [inputs.httpjson.headers] + # X-Auth-Token = "my-xauth-token" + # apiVersion = "v1" - tag_keys = [ - "role", - "version" - ] + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false ``` -If the JSON response is an array of objects, then each object will be parsed with the same configuration. +### Measurements & Fields: -You can also specify additional request parameters for the service: +- httpjson + - response_time (float): Response time in seconds -``` -[[inputs.httpjson]] - ... +Additional fields are dependant on the response of the remote service being polled. - [inputs.httpjson.parameters] - event_type = "cpu_spike" - threshold = "0.75" +### Tags: -``` +- All measurements have the following tags: + - server: HTTP origin as defined in configuration as `servers`. -You can also specify additional request header parameters for the service: +Any top level keys listed under `tag_keys` in the configuration are added as tags. Top level keys are defined as keys in the root level of the object in a single object response, or in the root level of each object within an array of objects. -``` -[[inputs.httpjson]] - ... - [inputs.httpjson.headers] - X-Auth-Token = "my-xauth-token" - apiVersion = "v1" -``` +### Examples Output: -# Example: +This plugin understands responses containing a single JSON object, or a JSON Array of Objects. -Let's say that we have a service named "mycollector" configured like this: - -``` -[[inputs.httpjson]] - name = "mycollector" - servers = [ - "http://my.service.com/_stats" - ] - # HTTP method to use (case-sensitive) - method = "GET" - tag_keys = ["service"] -``` - -which responds with the following JSON: +**Object Output:** +Given the following response body: ```json { - "service": "service01", "a": 0.5, "b": { "c": "some text", "d": 0.1, "e": 5 - } + }, + "service": "service01" } ``` +The following metric is produced: -The collected metrics will be: -``` -httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5 -httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1 -httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5 -``` - -# Example 2, Multiple Services: +`httpjson,server=http://localhost:9999/stats/ b_d=0.1,a=0.5,b_e=5,response_time=0.001` -There is also the option to collect JSON from multiple services, here is an example doing that. +Note that only numerical values are extracted and the type is float. -``` -[[inputs.httpjson]] - name = "mycollector1" - servers = [ - "http://my.service1.com/_stats" - ] - # HTTP method to use (case-sensitive) - method = "GET" +If `tag_keys` is included in the configuration: +```toml [[inputs.httpjson]] - name = "mycollector2" - servers = [ - "http://service.net/json/stats" - ] - # HTTP method to use (case-sensitive) - method = "POST" -``` - -The services respond with the following JSON: - -mycollector1: -```json -{ - "a": 0.5, - "b": { - "c": "some text", - "d": 0.1, - "e": 5 - } -} -``` - -mycollector2: -```json -{ - "load": 100, - "users": 1335 -} + tag_keys = ["service"] ``` -The collected metrics will be: +Then the `service` tag will also be added: -``` -httpjson_mycollector1_a,server='http://my.service.com/_stats' value=0.5 -httpjson_mycollector1_b_d,server='http://my.service.com/_stats' value=0.1 -httpjson_mycollector1_b_e,server='http://my.service.com/_stats' value=5 +`httpjson,server=http://localhost:9999/stats/,service=service01 b_d=0.1,a=0.5,b_e=5,response_time=0.001` -httpjson_mycollector2_load,server='http://service.net/json/stats' value=100 -httpjson_mycollector2_users,server='http://service.net/json/stats' value=1335 -``` +**Array Output:** -# Example 3, Multiple Metrics in Response: - -The response JSON can be treated as an array of data points that are all parsed with the same configuration. - -``` -[[inputs.httpjson]] - name = "mycollector" - servers = [ - "http://my.service.com/_stats" - ] - # HTTP method to use (case-sensitive) - method = "GET" - tag_keys = ["service"] -``` - -which responds with the following JSON: +If the service returns an array of objects, one metric is be created for each object: ```json [ @@ -193,12 +129,5 @@ which responds with the following JSON: ] ``` -The collected metrics will be: -``` -httpjson_mycollector_a,service='service01',server='http://my.service.com/_stats' value=0.5 -httpjson_mycollector_b_d,service='service01',server='http://my.service.com/_stats' value=0.1 -httpjson_mycollector_b_e,service='service01',server='http://my.service.com/_stats' value=5 -httpjson_mycollector_a,service='service02',server='http://my.service.com/_stats' value=0.6 -httpjson_mycollector_b_d,service='service02',server='http://my.service.com/_stats' value=0.2 -httpjson_mycollector_b_e,service='service02',server='http://my.service.com/_stats' value=6 -``` +`httpjson,server=http://localhost:9999/stats/,service=service01 a=0.5,b_d=0.1,b_e=5,response_time=0.003` +`httpjson,server=http://localhost:9999/stats/,service=service02 a=0.6,b_d=0.2,b_e=6,response_time=0.003` diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index 89bfccf77a31d..8bfe22bff9ab4 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -73,7 +73,10 @@ var sampleConfig = ` ## NOTE This plugin only reads numerical measurements, strings and booleans ## will be ignored. - ## a name for the service being polled + ## Name for the service being polled. Will be appended to the name of the + ## measurement e.g. httpjson_webserver_stats + ## + ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. name = "webserver_stats" ## URL of each server in the service's cluster @@ -93,12 +96,14 @@ var sampleConfig = ` # "my_tag_2" # ] - ## HTTP parameters (all values must be strings) - [inputs.httpjson.parameters] - event_type = "cpu_spike" - threshold = "0.75" + ## HTTP parameters (all values must be strings). For "GET" requests, data + ## will be included in the query. For "POST" requests, data will be included + ## in the request body as "x-www-form-urlencoded". + # [inputs.httpjson.parameters] + # event_type = "cpu_spike" + # threshold = "0.75" - ## HTTP Header parameters (all values must be strings) + ## HTTP Headers (all values must be strings) # [inputs.httpjson.headers] # X-Auth-Token = "my-xauth-token" # apiVersion = "v1" From 5ffc9fd379bcce9185346decf89fd1eab3b64d5e Mon Sep 17 00:00:00 2001 From: James Date: Tue, 4 Apr 2017 20:37:44 -0400 Subject: [PATCH 048/201] fix postgresql connection leak (#2611) --- CHANGELOG.md | 2 +- Godeps | 2 +- plugins/inputs/postgresql/connect.go | 22 ---------------- plugins/inputs/postgresql/postgresql.go | 16 ++++++++---- .../postgresql_extensible.go | 25 +++++++++++-------- 5 files changed, 28 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e7da095d4eba7..043a51a69dea0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,7 +92,7 @@ be deprecated eventually. - [#2576](https://github.com/influxdata/telegraf/pull/2576): Add write timeout to Riemann output - [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin - [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write - +- [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql. ## v1.2.1 [2017-02-01] diff --git a/Godeps b/Godeps index 9717cec2fd5f1..ab72be8f2d314 100644 --- a/Godeps +++ b/Godeps @@ -24,7 +24,7 @@ github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f github.com/influxdata/tail e9ef7e826dafcb3093b40b989fefa90eeb9a8ca1 github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec -github.com/jackc/pgx c8080fc4a1bfa44bf90383ad0fdce2f68b7d313c +github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8 github.com/kardianos/osext c2c54e542fb797ad986b31721e1baedf214ca413 github.com/kardianos/service 6d3a0ee7d3425d9d835debc51a0ca1ffa28f4893 github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142 diff --git a/plugins/inputs/postgresql/connect.go b/plugins/inputs/postgresql/connect.go index 77858cda2298a..011ae32e003b2 100644 --- a/plugins/inputs/postgresql/connect.go +++ b/plugins/inputs/postgresql/connect.go @@ -1,15 +1,11 @@ package postgresql import ( - "database/sql" "fmt" "net" "net/url" "sort" "strings" - - "github.com/jackc/pgx" - "github.com/jackc/pgx/stdlib" ) // pulled from lib/pq @@ -79,21 +75,3 @@ func ParseURL(uri string) (string, error) { sort.Strings(kvs) // Makes testing easier (not a performance concern) return strings.Join(kvs, " "), nil } - -func Connect(address string) (*sql.DB, error) { - if strings.HasPrefix(address, "postgres://") || strings.HasPrefix(address, "postgresql://") { - return sql.Open("pgx", address) - } - - config, err := pgx.ParseDSN(address) - if err != nil { - return nil, err - } - - pool, err := pgx.NewConnPool(pgx.ConnPoolConfig{ConnConfig: config}) - if err != nil { - return nil, err - } - - return stdlib.OpenFromConnPool(pool) -} diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 7c854dfd3885a..832c433ed9fdf 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -2,11 +2,15 @@ package postgresql import ( "bytes" + "database/sql" "fmt" "regexp" "sort" "strings" + // register in driver. + _ "github.com/jackc/pgx/stdlib" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -62,17 +66,19 @@ func (p *Postgresql) IgnoredColumns() map[string]bool { var localhost = "host=localhost sslmode=disable" func (p *Postgresql) Gather(acc telegraf.Accumulator) error { - var query string + var ( + err error + db *sql.DB + query string + ) if p.Address == "" || p.Address == "localhost" { p.Address = localhost } - db, err := Connect(p.Address) - if err != nil { + if db, err = sql.Open("pgx", p.Address); err != nil { return err } - defer db.Close() if len(p.Databases) == 0 && len(p.IgnoredDatabases) == 0 { @@ -107,7 +113,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { return err } } - //return rows.Err() + query = `SELECT * FROM pg_stat_bgwriter` bg_writer_row, err := db.Query(query) diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 00729bf7531d5..b8d3be625c8e5 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -2,11 +2,15 @@ package postgresql_extensible import ( "bytes" + "database/sql" "fmt" "log" "regexp" "strings" + // register in driver. + _ "github.com/jackc/pgx/stdlib" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/postgresql" @@ -112,23 +116,24 @@ func (p *Postgresql) IgnoredColumns() map[string]bool { var localhost = "host=localhost sslmode=disable" func (p *Postgresql) Gather(acc telegraf.Accumulator) error { - - var sql_query string - var query_addon string - var db_version int - var query string - var tag_value string - var meas_name string + var ( + err error + db *sql.DB + sql_query string + query_addon string + db_version int + query string + tag_value string + meas_name string + ) if p.Address == "" || p.Address == "localhost" { p.Address = localhost } - db, err := postgresql.Connect(p.Address) - if err != nil { + if db, err = sql.Open("pgx", p.Address); err != nil { return err } - defer db.Close() // Retreiving the database version From c9f8308f27aa849332f08acf1930a0781849d882 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 6 Apr 2017 12:06:08 -0700 Subject: [PATCH 049/201] Update filtering documentation (#2631) --- docs/CONFIGURATION.md | 59 +++++++++++++++++++++++++------------------ 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index ff4814b822f12..ad6e903fcf011 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -124,31 +124,40 @@ is not specified then processor execution order will be random. Filters can be configured per input, output, processor, or aggregator, see below for examples. -* **namepass**: An array of strings that is used to filter metrics generated by the -current input. Each string in the array is tested as a glob match against -measurement names and if it matches, the field is emitted. -* **namedrop**: The inverse of pass, if a measurement name matches, it is not emitted. -* **fieldpass**: An array of strings that is used to filter metrics generated by the -current input. Each string in the array is tested as a glob match against field names -and if it matches, the field is emitted. fieldpass is not available for outputs. -* **fielddrop**: The inverse of pass, if a field name matches, it is not emitted. -fielddrop is not available for outputs. -* **tagpass**: tag names and arrays of strings that are used to filter -measurements by the current input. Each string in the array is tested as a glob -match against the tag name, and if it matches the measurement is emitted. -* **tagdrop**: The inverse of tagpass. If a tag matches, the measurement is not -emitted. This is tested on measurements that have passed the tagpass test. -* **tagexclude**: tagexclude can be used to exclude a tag from measurement(s). -As opposed to tagdrop, which will drop an entire measurement based on it's -tags, tagexclude simply strips the given tag keys from the measurement. This -can be used on inputs & outputs, but it is _recommended_ to be used on inputs, -as it is more efficient to filter out tags at the ingestion point. -* **taginclude**: taginclude is the inverse of tagexclude. It will only include -the tag keys in the final measurement. - -**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of -the plugin definition, otherwise subsequent plugin config options will be -interpreted as part of the tagpass/tagdrop map. +* **namepass**: +An array of glob pattern strings. Only points whose measurement name matches +a pattern in this list are emitted. +* **namedrop**: +The inverse of `namepass`. If a match is found the point is discarded. This +is tested on points after they have passed the `namepass` test. +* **fieldpass**: +An array of glob pattern strings. Only fields whose field key matches a +pattern in this list are emitted. Not available for outputs. +* **fielddrop**: +The inverse of `fieldpass`. Fields with a field key matching one of the +patterns will be discarded from the point. Not available for outputs. +* **tagpass**: +A table mapping tag keys to arrays of glob pattern strings. Only points +that contain a tag key in the table and a tag value matching one of its +patterns is emitted. +* **tagdrop**: +The inverse of `tagpass`. If a match is found the point is discarded. This +is tested on points after they have passed the `tagpass` test. +* **taginclude**: +An array of glob pattern strings. Only tags with a tag key matching one of +the patterns are emitted. In contrast to `tagpass`, which will pass an entire +point based on its tag, `taginclude` removes all non matching tags from the +point. This filter can be used on both inputs & outputs, but it is +_recommended_ to be used on inputs, as it is more efficient to filter out tags +at the ingestion point. +* **tagexclude**: +The inverse of `taginclude`. Tags with a tag key matching one of the patterns +will be discarded from the point. + +**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters +must be defined at the _end_ of the plugin definition, otherwise subsequent +plugin config options will be interpreted as part of the tagpass/tagdrop +tables. #### Input Configuration Examples From 92fa20cef2da6c502c93bf5b1607c7ef9bca5742 Mon Sep 17 00:00:00 2001 From: Victor Yunevich Date: Fri, 7 Apr 2017 00:40:34 +0300 Subject: [PATCH 050/201] ipmi_sensor: allow @ symbol in password (#2633) --- CHANGELOG.md | 1 + plugins/inputs/ipmi_sensor/connection.go | 2 +- plugins/inputs/ipmi_sensor/connection_test.go | 42 +++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 plugins/inputs/ipmi_sensor/connection_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 043a51a69dea0..333963bd59f85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ be deprecated eventually. ### Bugfixes +- [#2633](https://github.com/influxdata/telegraf/pull/2633): ipmi_sensor: allow @ symbol in password - [#2077](https://github.com/influxdata/telegraf/issues/2077): SQL Server Input - Arithmetic overflow error converting numeric to data type int. - [#2262](https://github.com/influxdata/telegraf/issues/2262): Flush jitter can inhibit metric collection. - [#2318](https://github.com/influxdata/telegraf/issues/2318): haproxy input - Add missing fields. diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go index 432b4aa02aad3..b93cda7d4763f 100644 --- a/plugins/inputs/ipmi_sensor/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -18,7 +18,7 @@ type Connection struct { func NewConnection(server string) *Connection { conn := &Connection{} - inx1 := strings.Index(server, "@") + inx1 := strings.LastIndex(server, "@") inx2 := strings.Index(server, "(") inx3 := strings.Index(server, ")") diff --git a/plugins/inputs/ipmi_sensor/connection_test.go b/plugins/inputs/ipmi_sensor/connection_test.go new file mode 100644 index 0000000000000..13a62061daa6a --- /dev/null +++ b/plugins/inputs/ipmi_sensor/connection_test.go @@ -0,0 +1,42 @@ +package ipmi_sensor + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type conTest struct { + Got string + Want *Connection +} + +func TestNewConnection(t *testing.T) { + testData := []struct { + addr string + con *Connection + }{ + { + "USERID:PASSW0RD@lan(192.168.1.1)", + &Connection{ + Hostname: "192.168.1.1", + Username: "USERID", + Password: "PASSW0RD", + Interface: "lan", + }, + }, + { + "USERID:PASS:!@#$%^&*(234)_+W0RD@lan(192.168.1.1)", + &Connection{ + Hostname: "192.168.1.1", + Username: "USERID", + Password: "PASS:!@#$%^&*(234)_+W0RD", + Interface: "lan", + }, + }, + } + + for _, v := range testData { + assert.Equal(t, v.con, NewConnection(v.addr)) + } +} From 7cc4ca23418b2ccb8caa96dc1f10125d7b6a0e8d Mon Sep 17 00:00:00 2001 From: Rajaseelan Ganeswaran Date: Fri, 7 Apr 2017 05:44:02 +0800 Subject: [PATCH 051/201] Add sample config stanza for CPU (#2620) --- plugins/inputs/system/CPU_README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/plugins/inputs/system/CPU_README.md b/plugins/inputs/system/CPU_README.md index 26eb7ffbe2fac..01d57855bdf99 100644 --- a/plugins/inputs/system/CPU_README.md +++ b/plugins/inputs/system/CPU_README.md @@ -4,6 +4,18 @@ - **totalcpu** boolean: If true, include `cpu-total` data - **percpu** boolean: If true, include data on a per-cpu basis `cpu0, cpu1, etc.` + +##### Configuration: +``` +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics. + collect_cpu_time = false +``` + #### Description The CPU plugin collects standard CPU metrics as defined in `man proc`. All From aa722fac9b6585069d405a9bc1772d4900d971b0 Mon Sep 17 00:00:00 2001 From: Vladimir S Date: Sat, 8 Apr 2017 01:39:43 +0300 Subject: [PATCH 052/201] Add dmcache input plugin (#1667) --- CHANGELOG.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/dmcache/README.md | 47 +++++ plugins/inputs/dmcache/dmcache.go | 33 ++++ plugins/inputs/dmcache/dmcache_linux.go | 190 +++++++++++++++++++++ plugins/inputs/dmcache/dmcache_notlinux.go | 15 ++ plugins/inputs/dmcache/dmcache_test.go | 169 ++++++++++++++++++ 7 files changed, 456 insertions(+) create mode 100644 plugins/inputs/dmcache/README.md create mode 100644 plugins/inputs/dmcache/dmcache.go create mode 100644 plugins/inputs/dmcache/dmcache_linux.go create mode 100644 plugins/inputs/dmcache/dmcache_notlinux.go create mode 100644 plugins/inputs/dmcache/dmcache_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 333963bd59f85..46d8b57d5b1d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -64,6 +64,7 @@ be deprecated eventually. - [#2587](https://github.com/influxdata/telegraf/pull/2587): Add json timestamp units configurability - [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics. - [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags +- [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin ### Bugfixes diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index a9147c53ed153..983179e903bdc 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -15,6 +15,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/disque" + _ "github.com/influxdata/telegraf/plugins/inputs/dmcache" _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" _ "github.com/influxdata/telegraf/plugins/inputs/docker" _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" diff --git a/plugins/inputs/dmcache/README.md b/plugins/inputs/dmcache/README.md new file mode 100644 index 0000000000000..536d3f518bcaa --- /dev/null +++ b/plugins/inputs/dmcache/README.md @@ -0,0 +1,47 @@ +# DMCache Input Plugin + +This plugin provide a native collection for dmsetup based statistics for dm-cache. + +This plugin requires sudo, that is why you should setup and be sure that the telegraf is able to execute sudo without a password. + +`sudo /sbin/dmsetup status --target cache` is the full command that telegraf will run for debugging purposes. + +### Configuration + +```toml +[[inputs.dmcache]] + ## Whether to report per-device stats or not + per_device = true +``` + +### Measurements & Fields: + +- dmcache + - length + - target + - metadata_blocksize + - metadata_used + - metadata_total + - cache_blocksize + - cache_used + - cache_total + - read_hits + - read_misses + - write_hits + - write_misses + - demotions + - promotions + - dirty + +### Tags: + +- All measurements have the following tags: + - device + +### Example Output: + +``` +$ ./telegraf --test --config /etc/telegraf/telegraf.conf --input-filter dmcache +* Plugin: inputs.dmcache, Collection 1 +> dmcache,device=example cache_blocksize=0i,read_hits=995134034411520i,read_misses=916807089127424i,write_hits=195107267543040i,metadata_used=12861440i,write_misses=563725346013184i,promotions=3265223720960i,dirty=0i,metadata_blocksize=0i,cache_used=1099511627776ii,cache_total=0i,length=0i,metadata_total=1073741824i,demotions=3265223720960i 1491482035000000000 +``` diff --git a/plugins/inputs/dmcache/dmcache.go b/plugins/inputs/dmcache/dmcache.go new file mode 100644 index 0000000000000..25a398194edf8 --- /dev/null +++ b/plugins/inputs/dmcache/dmcache.go @@ -0,0 +1,33 @@ +package dmcache + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type DMCache struct { + PerDevice bool `toml:"per_device"` + getCurrentStatus func() ([]string, error) +} + +var sampleConfig = ` + ## Whether to report per-device stats or not + per_device = true +` + +func (c *DMCache) SampleConfig() string { + return sampleConfig +} + +func (c *DMCache) Description() string { + return "Provide a native collection for dmsetup based statistics for dm-cache" +} + +func init() { + inputs.Add("dmcache", func() telegraf.Input { + return &DMCache{ + PerDevice: true, + getCurrentStatus: dmSetupStatus, + } + }) +} diff --git a/plugins/inputs/dmcache/dmcache_linux.go b/plugins/inputs/dmcache/dmcache_linux.go new file mode 100644 index 0000000000000..7ac1c96cae0f1 --- /dev/null +++ b/plugins/inputs/dmcache/dmcache_linux.go @@ -0,0 +1,190 @@ +// +build linux + +package dmcache + +import ( + "os/exec" + "strconv" + "strings" + + "errors" + + "github.com/influxdata/telegraf" +) + +const metricName = "dmcache" + +type cacheStatus struct { + device string + length int + target string + metadataBlocksize int + metadataUsed int + metadataTotal int + cacheBlocksize int + cacheUsed int + cacheTotal int + readHits int + readMisses int + writeHits int + writeMisses int + demotions int + promotions int + dirty int +} + +func (c *DMCache) Gather(acc telegraf.Accumulator) error { + outputLines, err := c.getCurrentStatus() + if err != nil { + return err + } + + totalStatus := cacheStatus{} + + for _, s := range outputLines { + status, err := parseDMSetupStatus(s) + if err != nil { + return err + } + + if c.PerDevice { + tags := map[string]string{"device": status.device} + acc.AddFields(metricName, toFields(status), tags) + } + aggregateStats(&totalStatus, status) + } + + acc.AddFields(metricName, toFields(totalStatus), map[string]string{"device": "all"}) + + return nil +} + +func parseDMSetupStatus(line string) (cacheStatus, error) { + var err error + parseError := errors.New("Output from dmsetup could not be parsed") + status := cacheStatus{} + values := strings.Fields(line) + if len(values) < 15 { + return cacheStatus{}, parseError + } + + status.device = strings.TrimRight(values[0], ":") + status.length, err = strconv.Atoi(values[2]) + if err != nil { + return cacheStatus{}, err + } + status.target = values[3] + status.metadataBlocksize, err = strconv.Atoi(values[4]) + if err != nil { + return cacheStatus{}, err + } + metadata := strings.Split(values[5], "/") + if len(metadata) != 2 { + return cacheStatus{}, parseError + } + status.metadataUsed, err = strconv.Atoi(metadata[0]) + if err != nil { + return cacheStatus{}, err + } + status.metadataTotal, err = strconv.Atoi(metadata[1]) + if err != nil { + return cacheStatus{}, err + } + status.cacheBlocksize, err = strconv.Atoi(values[6]) + if err != nil { + return cacheStatus{}, err + } + cache := strings.Split(values[7], "/") + if len(cache) != 2 { + return cacheStatus{}, parseError + } + status.cacheUsed, err = strconv.Atoi(cache[0]) + if err != nil { + return cacheStatus{}, err + } + status.cacheTotal, err = strconv.Atoi(cache[1]) + if err != nil { + return cacheStatus{}, err + } + status.readHits, err = strconv.Atoi(values[8]) + if err != nil { + return cacheStatus{}, err + } + status.readMisses, err = strconv.Atoi(values[9]) + if err != nil { + return cacheStatus{}, err + } + status.writeHits, err = strconv.Atoi(values[10]) + if err != nil { + return cacheStatus{}, err + } + status.writeMisses, err = strconv.Atoi(values[11]) + if err != nil { + return cacheStatus{}, err + } + status.demotions, err = strconv.Atoi(values[12]) + if err != nil { + return cacheStatus{}, err + } + status.promotions, err = strconv.Atoi(values[13]) + if err != nil { + return cacheStatus{}, err + } + status.dirty, err = strconv.Atoi(values[14]) + if err != nil { + return cacheStatus{}, err + } + + return status, nil +} + +func aggregateStats(totalStatus *cacheStatus, status cacheStatus) { + totalStatus.length += status.length + totalStatus.metadataBlocksize += status.metadataBlocksize + totalStatus.metadataUsed += status.metadataUsed + totalStatus.metadataTotal += status.metadataTotal + totalStatus.cacheBlocksize += status.cacheBlocksize + totalStatus.cacheUsed += status.cacheUsed + totalStatus.cacheTotal += status.cacheTotal + totalStatus.readHits += status.readHits + totalStatus.readMisses += status.readMisses + totalStatus.writeHits += status.writeHits + totalStatus.writeMisses += status.writeMisses + totalStatus.demotions += status.demotions + totalStatus.promotions += status.promotions + totalStatus.dirty += status.dirty +} + +func toFields(status cacheStatus) map[string]interface{} { + fields := make(map[string]interface{}) + fields["length"] = status.length + fields["metadata_blocksize"] = status.metadataBlocksize + fields["metadata_used"] = status.metadataUsed + fields["metadata_total"] = status.metadataTotal + fields["cache_blocksize"] = status.cacheBlocksize + fields["cache_used"] = status.cacheUsed + fields["cache_total"] = status.cacheTotal + fields["read_hits"] = status.readHits + fields["read_misses"] = status.readMisses + fields["write_hits"] = status.writeHits + fields["write_misses"] = status.writeMisses + fields["demotions"] = status.demotions + fields["promotions"] = status.promotions + fields["dirty"] = status.dirty + return fields +} + +func dmSetupStatus() ([]string, error) { + out, err := exec.Command("/bin/sh", "-c", "sudo /sbin/dmsetup status --target cache").Output() + if err != nil { + return nil, err + } + if string(out) == "No devices found\n" { + return []string{}, nil + } + + outString := strings.TrimRight(string(out), "\n") + status := strings.Split(outString, "\n") + + return status, nil +} diff --git a/plugins/inputs/dmcache/dmcache_notlinux.go b/plugins/inputs/dmcache/dmcache_notlinux.go new file mode 100644 index 0000000000000..ee1065638cab7 --- /dev/null +++ b/plugins/inputs/dmcache/dmcache_notlinux.go @@ -0,0 +1,15 @@ +// +build !linux + +package dmcache + +import ( + "github.com/influxdata/telegraf" +) + +func (c *DMCache) Gather(acc telegraf.Accumulator) error { + return nil +} + +func dmSetupStatus() ([]string, error) { + return []string{}, nil +} diff --git a/plugins/inputs/dmcache/dmcache_test.go b/plugins/inputs/dmcache/dmcache_test.go new file mode 100644 index 0000000000000..c5989c413d9c0 --- /dev/null +++ b/plugins/inputs/dmcache/dmcache_test.go @@ -0,0 +1,169 @@ +package dmcache + +import ( + "errors" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var ( + measurement = "dmcache" + badFormatOutput = []string{"cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 "} + good2DevicesFormatOutput = []string{ + "cs-1: 0 4883791872 cache 8 1018/1501122 512 7/464962 139 352643 15 46 0 7 0 1 writeback 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8", + "cs-2: 0 4294967296 cache 8 72352/1310720 128 26/24327168 2409 286 265 524682 0 0 0 1 writethrough 2 migration_threshold 2048 mq 10 random_threshold 4 sequential_threshold 512 discard_promote_adjustment 1 read_promote_adjustment 4 write_promote_adjustment 8", + } +) + +func TestPerDeviceGoodOutput(t *testing.T) { + var acc testutil.Accumulator + var plugin = &DMCache{ + PerDevice: true, + getCurrentStatus: func() ([]string, error) { + return good2DevicesFormatOutput, nil + }, + } + + err := plugin.Gather(&acc) + require.NoError(t, err) + + tags1 := map[string]string{ + "device": "cs-1", + } + fields1 := map[string]interface{}{ + "length": 4883791872, + "metadata_blocksize": 8, + "metadata_used": 1018, + "metadata_total": 1501122, + "cache_blocksize": 512, + "cache_used": 7, + "cache_total": 464962, + "read_hits": 139, + "read_misses": 352643, + "write_hits": 15, + "write_misses": 46, + "demotions": 0, + "promotions": 7, + "dirty": 0, + } + acc.AssertContainsTaggedFields(t, measurement, fields1, tags1) + + tags2 := map[string]string{ + "device": "cs-2", + } + fields2 := map[string]interface{}{ + "length": 4294967296, + "metadata_blocksize": 8, + "metadata_used": 72352, + "metadata_total": 1310720, + "cache_blocksize": 128, + "cache_used": 26, + "cache_total": 24327168, + "read_hits": 2409, + "read_misses": 286, + "write_hits": 265, + "write_misses": 524682, + "demotions": 0, + "promotions": 0, + "dirty": 0, + } + acc.AssertContainsTaggedFields(t, measurement, fields2, tags2) + + tags3 := map[string]string{ + "device": "all", + } + + fields3 := map[string]interface{}{ + "length": 9178759168, + "metadata_blocksize": 16, + "metadata_used": 73370, + "metadata_total": 2811842, + "cache_blocksize": 640, + "cache_used": 33, + "cache_total": 24792130, + "read_hits": 2548, + "read_misses": 352929, + "write_hits": 280, + "write_misses": 524728, + "demotions": 0, + "promotions": 7, + "dirty": 0, + } + acc.AssertContainsTaggedFields(t, measurement, fields3, tags3) +} + +func TestNotPerDeviceGoodOutput(t *testing.T) { + var acc testutil.Accumulator + var plugin = &DMCache{ + PerDevice: false, + getCurrentStatus: func() ([]string, error) { + return good2DevicesFormatOutput, nil + }, + } + + err := plugin.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "device": "all", + } + + fields := map[string]interface{}{ + "length": 9178759168, + "metadata_blocksize": 16, + "metadata_used": 73370, + "metadata_total": 2811842, + "cache_blocksize": 640, + "cache_used": 33, + "cache_total": 24792130, + "read_hits": 2548, + "read_misses": 352929, + "write_hits": 280, + "write_misses": 524728, + "demotions": 0, + "promotions": 7, + "dirty": 0, + } + acc.AssertContainsTaggedFields(t, measurement, fields, tags) +} + +func TestNoDevicesOutput(t *testing.T) { + var acc testutil.Accumulator + var plugin = &DMCache{ + PerDevice: true, + getCurrentStatus: func() ([]string, error) { + return []string{}, nil + }, + } + + err := plugin.Gather(&acc) + require.NoError(t, err) +} + +func TestErrorDuringGettingStatus(t *testing.T) { + var acc testutil.Accumulator + var plugin = &DMCache{ + PerDevice: true, + getCurrentStatus: func() ([]string, error) { + return nil, errors.New("dmsetup doesn't exist") + }, + } + + err := plugin.Gather(&acc) + require.Error(t, err) +} + +func TestBadFormatOfStatus(t *testing.T) { + var acc testutil.Accumulator + var plugin = &DMCache{ + PerDevice: true, + getCurrentStatus: func() ([]string, error) { + return badFormatOutput, nil + }, + } + + err := plugin.Gather(&acc) + require.Error(t, err) +} From 07c428ef89e225a480a7835f32890f4db3cd534d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Apr 2017 14:33:17 -0700 Subject: [PATCH 053/201] Use random port in http_listener tests --- plugins/inputs/http_listener/http_listener.go | 2 + .../http_listener/http_listener_test.go | 59 +++++++++++-------- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go index 0f426f8093cf5..f0ad5752e6bf2 100644 --- a/plugins/inputs/http_listener/http_listener.go +++ b/plugins/inputs/http_listener/http_listener.go @@ -35,6 +35,7 @@ type HTTPListener struct { WriteTimeout internal.Duration MaxBodySize int64 MaxLineSize int + Port int mu sync.Mutex wg sync.WaitGroup @@ -124,6 +125,7 @@ func (h *HTTPListener) Start(acc telegraf.Accumulator) error { return err } h.listener = listener + h.Port = listener.Addr().(*net.TCPAddr).Port h.wg.Add(1) go func() { diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go index 7e6fbc8abfdda..41c0e9db8f0b7 100644 --- a/plugins/inputs/http_listener/http_listener_test.go +++ b/plugins/inputs/http_listener/http_listener_test.go @@ -4,6 +4,8 @@ import ( "bytes" "io/ioutil" "net/http" + "net/url" + "strconv" "sync" "testing" @@ -30,11 +32,21 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 func newTestHTTPListener() *HTTPListener { listener := &HTTPListener{ - ServiceAddress: ":8186", + ServiceAddress: ":0", } return listener } +func createURL(listener *HTTPListener, path string, rawquery string) string { + u := url.URL{ + Scheme: "http", + Host: "localhost:" + strconv.Itoa(listener.Port), + Path: path, + RawQuery: rawquery, + } + return u.String() +} + func TestWriteHTTP(t *testing.T) { listener := newTestHTTPListener() @@ -43,7 +55,7 @@ func TestWriteHTTP(t *testing.T) { defer listener.Stop() // post single message to listener - resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsg))) + resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) @@ -54,7 +66,7 @@ func TestWriteHTTP(t *testing.T) { ) // post multiple message to listener - resp, err = http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs))) + resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) @@ -69,7 +81,7 @@ func TestWriteHTTP(t *testing.T) { } // Post a gigantic metric to the listener and verify that an error is returned: - resp, err = http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(hugeMetric))) + resp, err = http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) require.EqualValues(t, 400, resp.StatusCode) @@ -89,7 +101,7 @@ func TestWriteHTTPNoNewline(t *testing.T) { defer listener.Stop() // post single message to listener - resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgNoNewline))) + resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) @@ -102,7 +114,7 @@ func TestWriteHTTPNoNewline(t *testing.T) { func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { listener := &HTTPListener{ - ServiceAddress: ":8296", + ServiceAddress: ":0", MaxLineSize: 128 * 1000, } @@ -111,14 +123,14 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { defer listener.Stop() // Post a gigantic metric to the listener and verify that it writes OK this time: - resp, err := http.Post("http://localhost:8296/write?db=mydb", "", bytes.NewBuffer([]byte(hugeMetric))) + resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) } func TestWriteHTTPVerySmallMaxBody(t *testing.T) { listener := &HTTPListener{ - ServiceAddress: ":8297", + ServiceAddress: ":0", MaxBodySize: 4096, } @@ -126,14 +138,14 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - resp, err := http.Post("http://localhost:8297/write", "", bytes.NewBuffer([]byte(hugeMetric))) + resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) require.EqualValues(t, 413, resp.StatusCode) } func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { listener := &HTTPListener{ - ServiceAddress: ":8298", + ServiceAddress: ":0", MaxLineSize: 70, } @@ -141,7 +153,7 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - resp, err := http.Post("http://localhost:8298/write", "", bytes.NewBuffer([]byte(testMsgs))) + resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) @@ -158,7 +170,7 @@ func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { func TestWriteHTTPLargeLinesSkipped(t *testing.T) { listener := &HTTPListener{ - ServiceAddress: ":8300", + ServiceAddress: ":0", MaxLineSize: 100, } @@ -166,7 +178,7 @@ func TestWriteHTTPLargeLinesSkipped(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - resp, err := http.Post("http://localhost:8300/write", "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) + resp, err := http.Post(createURL(listener, "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) require.NoError(t, err) require.EqualValues(t, 400, resp.StatusCode) @@ -183,9 +195,7 @@ func TestWriteHTTPLargeLinesSkipped(t *testing.T) { // test that writing gzipped data works func TestWriteHTTPGzippedData(t *testing.T) { - listener := &HTTPListener{ - ServiceAddress: ":8299", - } + listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -194,7 +204,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { data, err := ioutil.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) - req, err := http.NewRequest("POST", "http://localhost:8299/write", bytes.NewBuffer(data)) + req, err := http.NewRequest("POST", createURL(listener, "/write", ""), bytes.NewBuffer(data)) require.NoError(t, err) req.Header.Set("Content-Encoding", "gzip") @@ -216,7 +226,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { // writes 25,000 metrics to the listener with 10 different writers func TestWriteHTTPHighTraffic(t *testing.T) { - listener := &HTTPListener{ServiceAddress: ":8286"} + listener := newTestHTTPListener() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -229,7 +239,7 @@ func TestWriteHTTPHighTraffic(t *testing.T) { go func(innerwg *sync.WaitGroup) { defer innerwg.Done() for i := 0; i < 500; i++ { - resp, err := http.Post("http://localhost:8286/write?db=mydb", "", bytes.NewBuffer([]byte(testMsgs))) + resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) } @@ -251,7 +261,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { defer listener.Stop() // post single message to listener - resp, err := http.Post("http://localhost:8186/foobar", "", bytes.NewBuffer([]byte(testMsg))) + resp, err := http.Post(createURL(listener, "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) require.EqualValues(t, 404, resp.StatusCode) } @@ -264,7 +274,7 @@ func TestWriteHTTPInvalid(t *testing.T) { defer listener.Stop() // post single message to listener - resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(badMsg))) + resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) require.EqualValues(t, 400, resp.StatusCode) } @@ -277,7 +287,7 @@ func TestWriteHTTPEmpty(t *testing.T) { defer listener.Stop() // post single message to listener - resp, err := http.Post("http://localhost:8186/write?db=mydb", "", bytes.NewBuffer([]byte(emptyMsg))) + resp, err := http.Post(createURL(listener, "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) } @@ -290,12 +300,13 @@ func TestQueryAndPingHTTP(t *testing.T) { defer listener.Stop() // post query to listener - resp, err := http.Post("http://localhost:8186/query?db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22", "", nil) + resp, err := http.Post( + createURL(listener, "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) require.NoError(t, err) require.EqualValues(t, 200, resp.StatusCode) // post ping to listener - resp, err = http.Post("http://localhost:8186/ping", "", nil) + resp, err = http.Post(createURL(listener, "/ping", ""), "", nil) require.NoError(t, err) require.EqualValues(t, 204, resp.StatusCode) } From 62b5c1f7e765c80a1c1d95c91b964207ef4d5dde Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 10 Apr 2017 16:39:40 -0700 Subject: [PATCH 054/201] Add support for precision in http_listener (#2644) --- CHANGELOG.md | 1 + metric/parse.go | 50 +++++++++++++++++-- metric/parse_test.go | 21 ++++++++ plugins/inputs/http_listener/README.md | 11 +++- plugins/inputs/http_listener/http_listener.go | 12 +++-- .../http_listener/http_listener_test.go | 19 +++++++ plugins/parsers/influx/parser.go | 6 +-- 7 files changed, 107 insertions(+), 13 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 46d8b57d5b1d6..9aab18fcd9181 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ be deprecated eventually. - [#2597](https://github.com/influxdata/telegraf/issues/2597): Add support for Linux sysctl-fs metrics. - [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags - [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin +- [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener ### Bugfixes diff --git a/metric/parse.go b/metric/parse.go index 15b88e5528e1a..92dc4918bb19f 100644 --- a/metric/parse.go +++ b/metric/parse.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "fmt" + "strconv" "time" "github.com/influxdata/telegraf" @@ -40,10 +41,18 @@ const ( ) func Parse(buf []byte) ([]telegraf.Metric, error) { - return ParseWithDefaultTime(buf, time.Now()) + return ParseWithDefaultTimePrecision(buf, time.Now(), "") } func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) { + return ParseWithDefaultTimePrecision(buf, t, "") +} + +func ParseWithDefaultTimePrecision( + buf []byte, + t time.Time, + precision string, +) ([]telegraf.Metric, error) { if len(buf) == 0 { return []telegraf.Metric{}, nil } @@ -63,7 +72,7 @@ func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) { continue } - m, err := parseMetric(buf[i:i+j], t) + m, err := parseMetric(buf[i:i+j], t, precision) if err != nil { i += j + 1 // increment i past the previous newline errStr += " " + err.Error() @@ -80,7 +89,10 @@ func ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) { return metrics, nil } -func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) { +func parseMetric(buf []byte, + defaultTime time.Time, + precision string, +) (telegraf.Metric, error) { var dTime string // scan the first block which is measurement[,tag1=value1,tag2=value=2...] pos, key, err := scanKey(buf, 0) @@ -114,9 +126,23 @@ func parseMetric(buf []byte, defaultTime time.Time) (telegraf.Metric, error) { return nil, err } + // apply precision multiplier + var nsec int64 + multiplier := getPrecisionMultiplier(precision) + if multiplier > 1 { + tsint, err := parseIntBytes(ts, 10, 64) + if err != nil { + return nil, err + } + + nsec := multiplier * tsint + ts = []byte(strconv.FormatInt(nsec, 10)) + } + m := &metric{ fields: fields, t: ts, + nsec: nsec, } // parse out the measurement name @@ -628,3 +654,21 @@ func makeError(reason string, buf []byte, i int) error { return fmt.Errorf("metric parsing error, reason: [%s], buffer: [%s], index: [%d]", reason, buf, i) } + +// getPrecisionMultiplier will return a multiplier for the precision specified. +func getPrecisionMultiplier(precision string) int64 { + d := time.Nanosecond + switch precision { + case "u": + d = time.Microsecond + case "ms": + d = time.Millisecond + case "s": + d = time.Second + case "m": + d = time.Minute + case "h": + d = time.Hour + } + return int64(d) +} diff --git a/metric/parse_test.go b/metric/parse_test.go index 40bcf60b8daf1..89ade9f56da80 100644 --- a/metric/parse_test.go +++ b/metric/parse_test.go @@ -364,6 +364,27 @@ func TestParseNegativeTimestamps(t *testing.T) { } } +func TestParsePrecision(t *testing.T) { + for _, tt := range []struct { + line string + precision string + expected int64 + }{ + {"test v=42 1491847420", "s", 1491847420000000000}, + {"test v=42 1491847420123", "ms", 1491847420123000000}, + {"test v=42 1491847420123456", "u", 1491847420123456000}, + {"test v=42 1491847420123456789", "ns", 1491847420123456789}, + + {"test v=42 1491847420123456789", "1s", 1491847420123456789}, + {"test v=42 1491847420123456789", "asdf", 1491847420123456789}, + } { + metrics, err := ParseWithDefaultTimePrecision( + []byte(tt.line+"\n"), time.Now(), tt.precision) + assert.NoError(t, err, tt) + assert.Equal(t, tt.expected, metrics[0].UnixNano()) + } +} + func TestParseMaxKeyLength(t *testing.T) { key := "" for { diff --git a/plugins/inputs/http_listener/README.md b/plugins/inputs/http_listener/README.md index 9643f6a2ed8e5..994df654abc23 100644 --- a/plugins/inputs/http_listener/README.md +++ b/plugins/inputs/http_listener/README.md @@ -2,11 +2,18 @@ The HTTP listener is a service input plugin that listens for messages sent via HTTP POST. The plugin expects messages in the InfluxDB line-protocol ONLY, other Telegraf input data formats are not supported. -The intent of the plugin is to allow Telegraf to serve as a proxy/router for the /write endpoint of the InfluxDB HTTP API. +The intent of the plugin is to allow Telegraf to serve as a proxy/router for the `/write` endpoint of the InfluxDB HTTP API. + +The `/write` endpoint supports the `precision` query parameter and can be set to one of `ns`, `u`, `ms`, `s`, `m`, `h`. All other parameters are ignored and defer to the output plugins configuration. + When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database. See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx). -Example: curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' + +**Example:** +``` +curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' +``` ### Configuration: diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go index f0ad5752e6bf2..5ef2603043612 100644 --- a/plugins/inputs/http_listener/http_listener.go +++ b/plugins/inputs/http_listener/http_listener.go @@ -207,10 +207,12 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { } now := time.Now() + precision := req.URL.Query().Get("precision") + // Handle gzip request bodies body := req.Body - var err error if req.Header.Get("Content-Encoding") == "gzip" { + var err error body, err = gzip.NewReader(req.Body) defer body.Close() if err != nil { @@ -263,7 +265,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { if err == io.ErrUnexpectedEOF { // finished reading the request body - if err := h.parse(buf[:n+bufStart], now); err != nil { + if err := h.parse(buf[:n+bufStart], now, precision); err != nil { log.Println("E! " + err.Error()) return400 = true } @@ -288,7 +290,7 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { bufStart = 0 continue } - if err := h.parse(buf[:i+1], now); err != nil { + if err := h.parse(buf[:i+1], now, precision); err != nil { log.Println("E! " + err.Error()) return400 = true } @@ -301,8 +303,8 @@ func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { } } -func (h *HTTPListener) parse(b []byte, t time.Time) error { - metrics, err := h.parser.ParseWithDefaultTime(b, t) +func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error { + metrics, err := h.parser.ParseWithDefaultTimePrecision(b, t, precision) for _, m := range metrics { h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go index 41c0e9db8f0b7..654f2f83d7283 100644 --- a/plugins/inputs/http_listener/http_listener_test.go +++ b/plugins/inputs/http_listener/http_listener_test.go @@ -8,6 +8,7 @@ import ( "strconv" "sync" "testing" + "time" "github.com/influxdata/telegraf/testutil" @@ -311,5 +312,23 @@ func TestQueryAndPingHTTP(t *testing.T) { require.EqualValues(t, 204, resp.StatusCode) } +func TestWriteWithPrecision(t *testing.T) { + listener := newTestHTTPListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + msg := "xyzzy value=42 1422568543\n" + resp, err := http.Post( + createURL(listener, "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + require.Equal(t, 1, len(acc.Metrics)) + require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time) +} + const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i ` diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index c15c503f7a06a..0abb330e87d39 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -15,13 +15,13 @@ type InfluxParser struct { DefaultTags map[string]string } -func (p *InfluxParser) ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf.Metric, error) { +func (p *InfluxParser) ParseWithDefaultTimePrecision(buf []byte, t time.Time, precision string) ([]telegraf.Metric, error) { if !bytes.HasSuffix(buf, []byte("\n")) { buf = append(buf, '\n') } // parse even if the buffer begins with a newline buf = bytes.TrimPrefix(buf, []byte("\n")) - metrics, err := metric.ParseWithDefaultTime(buf, t) + metrics, err := metric.ParseWithDefaultTimePrecision(buf, t, precision) if len(p.DefaultTags) > 0 { for _, m := range metrics { for k, v := range p.DefaultTags { @@ -41,7 +41,7 @@ func (p *InfluxParser) ParseWithDefaultTime(buf []byte, t time.Time) ([]telegraf // a non-nil error will be returned in addition to the metrics that parsed // successfully. func (p *InfluxParser) Parse(buf []byte) ([]telegraf.Metric, error) { - return p.ParseWithDefaultTime(buf, time.Now()) + return p.ParseWithDefaultTimePrecision(buf, time.Now(), "") } func (p *InfluxParser) ParseLine(line string) (telegraf.Metric, error) { From 516dffa4c4579fffd6a2f757e866707e22272f6f Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 10 Apr 2017 19:45:02 -0400 Subject: [PATCH 055/201] set default measurement name on snmp input (#2639) --- CHANGELOG.md | 1 + plugins/inputs/snmp/snmp.go | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9aab18fcd9181..3a16982148ebd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -96,6 +96,7 @@ be deprecated eventually. - [#2596](https://github.com/influxdata/telegraf/pull/2596): fix timestamp parsing on prometheus plugin - [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write - [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql. +- [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input. ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 5394e57db5634..2aef729b31090 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -314,6 +314,7 @@ func Errorf(err error, msg string, format ...interface{}) error { func init() { inputs.Add("snmp", func() telegraf.Input { return &Snmp{ + Name: "snmp", Retries: 3, MaxRepetitions: 10, Timeout: internal.Duration{Duration: 5 * time.Second}, From f55af7d21f7e6f124f69009fb4b9a8ff2c91d52a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 11 Apr 2017 11:41:09 -0700 Subject: [PATCH 056/201] Use name filter for IOCounters in diskio (#2649) Use IOCountersForNames for disk counters. --- CHANGELOG.md | 1 + Godeps | 2 +- plugins/inputs/system/disk.go | 15 +-------------- plugins/inputs/system/mock_PS.go | 2 +- plugins/inputs/system/ps.go | 6 +++--- 5 files changed, 7 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a16982148ebd..12381152c732f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -97,6 +97,7 @@ be deprecated eventually. - [#2610](https://github.com/influxdata/telegraf/pull/2610): Fix deadlock when output cannot write - [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql. - [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input. +- [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks ## v1.2.1 [2017-02-01] diff --git a/Godeps b/Godeps index ab72be8f2d314..2e04c0cddf34e 100644 --- a/Godeps +++ b/Godeps @@ -44,7 +44,7 @@ github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4 github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160 github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693 -github.com/shirou/gopsutil d371ba1293cb48fedc6850526ea48b3846c54f2c +github.com/shirou/gopsutil dfbb3e40da8d6fcd1aa0d87003e965fe0ca745ea github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15 github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index 3f6d83c1cc592..004466f836013 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -125,25 +125,12 @@ func (_ *DiskIOStats) SampleConfig() string { } func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error { - diskio, err := s.ps.DiskIO() + diskio, err := s.ps.DiskIO(s.Devices) if err != nil { return fmt.Errorf("error getting disk io info: %s", err) } - var restrictDevices bool - devices := make(map[string]bool) - if len(s.Devices) != 0 { - restrictDevices = true - for _, dev := range s.Devices { - devices[dev] = true - } - } - for _, io := range diskio { - _, member := devices[io.Name] - if restrictDevices && !member { - continue - } tags := map[string]string{} tags["name"] = s.diskName(io.Name) for t, v := range s.diskTags(io.Name) { diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index e9f96a6c7bad9..a83a8b80332d8 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -61,7 +61,7 @@ func (m *MockPS) NetProto() ([]net.ProtoCountersStat, error) { return r0, r1 } -func (m *MockPS) DiskIO() (map[string]disk.IOCountersStat, error) { +func (m *MockPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { ret := m.Called() r0 := ret.Get(0).(map[string]disk.IOCountersStat) diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index b0e021e407b35..d253278122d35 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -17,7 +17,7 @@ type PS interface { DiskUsage(mountPointFilter []string, fstypeExclude []string) ([]*disk.UsageStat, []*disk.PartitionStat, error) NetIO() ([]net.IOCountersStat, error) NetProto() ([]net.ProtoCountersStat, error) - DiskIO() (map[string]disk.IOCountersStat, error) + DiskIO(names []string) (map[string]disk.IOCountersStat, error) VMStat() (*mem.VirtualMemoryStat, error) SwapStat() (*mem.SwapMemoryStat, error) NetConnections() ([]net.ConnectionStat, error) @@ -120,8 +120,8 @@ func (s *systemPS) NetConnections() ([]net.ConnectionStat, error) { return net.Connections("all") } -func (s *systemPS) DiskIO() (map[string]disk.IOCountersStat, error) { - m, err := disk.IOCounters() +func (s *systemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { + m, err := disk.IOCountersForNames(names) if err == internal.NotImplementedError { return nil, nil } From 0193cbee513bbd66b61f36a533241b9b212758fb Mon Sep 17 00:00:00 2001 From: Nick Irvine Date: Tue, 11 Apr 2017 12:05:39 -0700 Subject: [PATCH 057/201] Add max_message_len in kafka_consumer input (#2636) --- CHANGELOG.md | 1 + plugins/inputs/kafka_consumer/README.md | 4 +++ .../inputs/kafka_consumer/kafka_consumer.go | 31 ++++++++++++------- .../kafka_consumer/kafka_consumer_test.go | 18 +++++++++++ 4 files changed, 43 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 12381152c732f..10934f7fd4ff0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ be deprecated eventually. - [#2425](https://github.com/influxdata/telegraf/pull/2425): Support to include/exclude docker container labels as tags - [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin - [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener +- [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input ### Bugfixes diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index afdb51e32a274..6a95a7c54c833 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -28,6 +28,10 @@ from the same topic in parallel. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 65536 ``` ## Testing diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 6f1f4020b4633..2f6933db0c004 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -17,6 +17,7 @@ import ( type Kafka struct { ConsumerGroup string Topics []string + MaxMessageLen int ZookeeperPeers []string ZookeeperChroot string Consumer *consumergroup.ConsumerGroup @@ -58,10 +59,14 @@ var sampleConfig = ` offset = "oldest" ## Data format to consume. - ## Each data format has it's own unique set of configuration options, read + ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 65536 ` func (k *Kafka) SampleConfig() string { @@ -130,17 +135,21 @@ func (k *Kafka) receiver() { return case err := <-k.errs: if err != nil { - k.acc.AddError(fmt.Errorf("Kafka Consumer Error: %s\n", err)) + k.acc.AddError(fmt.Errorf("Consumer Error: %s\n", err)) } case msg := <-k.in: - metrics, err := k.parser.Parse(msg.Value) - if err != nil { - k.acc.AddError(fmt.Errorf("E! Kafka Message Parse Error\nmessage: %s\nerror: %s", - string(msg.Value), err.Error())) - } - - for _, metric := range metrics { - k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) + if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen { + k.acc.AddError(fmt.Errorf("Message longer than max_message_len (%d > %d)", + len(msg.Value), k.MaxMessageLen)) + } else { + metrics, err := k.parser.Parse(msg.Value) + if err != nil { + k.acc.AddError(fmt.Errorf("Message Parse Error\nmessage: %s\nerror: %s", + string(msg.Value), err.Error())) + } + for _, metric := range metrics { + k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) + } } if !k.doNotCommitMsgs { @@ -159,7 +168,7 @@ func (k *Kafka) Stop() { defer k.Unlock() close(k.done) if err := k.Consumer.Close(); err != nil { - k.acc.AddError(fmt.Errorf("E! Error closing kafka consumer: %s\n", err.Error())) + k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error())) } } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index e1c24adbed1b6..04498261ca69f 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -1,6 +1,7 @@ package kafka_consumer import ( + "strings" "testing" "github.com/influxdata/telegraf/plugins/parsers" @@ -62,6 +63,23 @@ func TestRunParserInvalidMsg(t *testing.T) { assert.Equal(t, acc.NFields(), 0) } +// Test that overlong messages are dropped +func TestDropOverlongMsg(t *testing.T) { + const maxMessageLen = 64 * 1024 + k, in := newTestKafka() + k.MaxMessageLen = maxMessageLen + acc := testutil.Accumulator{} + k.acc = &acc + defer close(k.done) + overlongMsg := strings.Repeat("v", maxMessageLen+1) + + go k.receiver() + in <- saramaMsg(overlongMsg) + acc.WaitError(1) + + assert.Equal(t, acc.NFields(), 0) +} + // Test that the parser parses kafka messages into points func TestRunParserAndGather(t *testing.T) { k, in := newTestKafka() From 2c98e5ae66127c87d44f5a2ac5b819879a4b76e5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Apr 2017 10:41:26 -0700 Subject: [PATCH 058/201] Add collectd parser (#2654) --- CHANGELOG.md | 1 + Godeps | 1 + README.md | 10 + docs/DATA_FORMATS_INPUT.md | 41 +++ docs/LICENSE_OF_DEPENDENCIES.md | 2 +- internal/config/config.go | 31 ++ logger/logger.go | 11 +- logger/logger_test.go | 13 + .../inputs/socket_listener/socket_listener.go | 4 +- plugins/parsers/collectd/parser.go | 165 ++++++++++ plugins/parsers/collectd/parser_test.go | 298 ++++++++++++++++++ plugins/parsers/registry.go | 19 ++ 12 files changed, 592 insertions(+), 4 deletions(-) create mode 100644 plugins/parsers/collectd/parser.go create mode 100644 plugins/parsers/collectd/parser_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 10934f7fd4ff0..a2d9fc68e0f51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ be deprecated eventually. - [#1667](https://github.com/influxdata/telegraf/pull/1667): dmcache input plugin - [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener - [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input +- [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser ### Bugfixes diff --git a/Godeps b/Godeps index 2e04c0cddf34e..a41d028c8745b 100644 --- a/Godeps +++ b/Godeps @@ -1,3 +1,4 @@ +collectd.org 2ce144541b8903101fb8f1483cc0497a68798122 github.com/Shopify/sarama 574d3147eee384229bf96a5d12c207fe7b5234f3 github.com/Sirupsen/logrus 61e43dc76f7ee59a82bdf3d71033dc12bea4c77d github.com/aerospike/aerospike-client-go 95e1ad7791bdbca44707fedbb29be42024900d9c diff --git a/README.md b/README.md index 55154e36a8994..f46c2e2984d0f 100644 --- a/README.md +++ b/README.md @@ -195,6 +195,16 @@ Telegraf can also collect metrics via the following service plugins: * [mandrill](./plugins/inputs/webhooks/mandrill) * [rollbar](./plugins/inputs/webhooks/rollbar) +Telegraf is able to parse the following input data formats into metrics, these +formats may be used with input plugins supporting the `data_format` option: + +* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx) +* [JSON](./docs/DATA_FORMATS_INPUT.md#json) +* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite) +* [Value](./docs/DATA_FORMATS_INPUT.md#value) +* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios) +* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd) + ## Processor Plugins * [printer](./plugins/processors/printer) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index f2a635d89d1af..59287e4a4b05e 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -7,6 +7,7 @@ Telegraf is able to parse the following input data formats into metrics: 1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite) 1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah" 1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only) +1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd) Telegraf metrics, like InfluxDB [points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), @@ -438,3 +439,43 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "nagios" ``` + +# Collectd: + +The collectd format parses the collectd binary network protocol. Tags are +created for host, instance, type, and type instance. All collectd values are +added as float64 fields. + +For more information about the binary network protocol see +[here](https://collectd.org/wiki/index.php/Binary_protocol). + +You can control the cryptographic settings with parser options. Create an +authentication file and set `collectd_auth_file` to the path of the file, then +set the desired security level in `collectd_security_level`. + +Additional information including client setup can be found +[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup). + +You can also change the path to the typesdb or add additional typesdb using +`collectd_typesdb`. + +#### Collectd Configuration: + +```toml +[[inputs.socket_listener]] + service_address = "udp://127.0.0.1:25826" + name_prefix = "collectd_" + + ## Data format to consume. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "collectd" + + ## Authentication file for cryptographic security levels + collectd_auth_file = "/etc/collectd/auth_file" + ## One of none (default), sign, or encrypt + collectd_security_level = "encrypt" + ## Path of to TypesDB specifications + collectd_typesdb = ["/usr/share/collectd/types.db"] +``` diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 5bb1bd036214b..a367aa7fbcba6 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -1,4 +1,5 @@ # List +- collectd.org [MIT LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) - github.com/Shopify/sarama [MIT LICENSE](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE) - github.com/Sirupsen/logrus [MIT LICENSE](https://github.com/Sirupsen/logrus/blob/master/LICENSE) - github.com/armon/go-metrics [MIT LICENSE](https://github.com/armon/go-metrics/blob/master/LICENSE) @@ -30,4 +31,3 @@ - gopkg.in/dancannon/gorethink.v1 [APACHE LICENSE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE) - gopkg.in/mgo.v2 [BSD LICENSE](https://github.com/go-mgo/mgo/blob/v2/LICENSE) - golang.org/x/crypto/ [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) - diff --git a/internal/config/config.go b/internal/config/config.go index 013e81c1259a8..f8c3041793336 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1230,6 +1230,34 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } } + if node, ok := tbl.Fields["collectd_auth_file"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CollectdAuthFile = str.Value + } + } + } + + if node, ok := tbl.Fields["collectd_security_level"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CollectdSecurityLevel = str.Value + } + } + } + + if node, ok := tbl.Fields["collectd_typesdb"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value) + } + } + } + } + } + c.MetricName = name delete(tbl.Fields, "data_format") @@ -1237,6 +1265,9 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { delete(tbl.Fields, "templates") delete(tbl.Fields, "tag_keys") delete(tbl.Fields, "data_type") + delete(tbl.Fields, "collectd_auth_file") + delete(tbl.Fields, "collectd_security_level") + delete(tbl.Fields, "collectd_typesdb") return parsers.NewParser(c) } diff --git a/logger/logger.go b/logger/logger.go index 49613c27db5ea..7ad1c80692cef 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -4,11 +4,14 @@ import ( "io" "log" "os" + "regexp" "time" "github.com/influxdata/wlog" ) +var prefixRegex = regexp.MustCompile("^[DIWE]!") + // newTelegrafWriter returns a logging-wrapped writer. func newTelegrafWriter(w io.Writer) io.Writer { return &telegrafLog{ @@ -21,7 +24,13 @@ type telegrafLog struct { } func (t *telegrafLog) Write(b []byte) (n int, err error) { - return t.writer.Write(append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...)) + var line []byte + if !prefixRegex.Match(b) { + line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...) + } else { + line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...) + } + return t.writer.Write(line) } // SetupLogging configures the logging output. diff --git a/logger/logger_test.go b/logger/logger_test.go index 8c0826e65cbc3..09c7c82ebad6b 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -51,6 +51,19 @@ func TestErrorWriteLogToFile(t *testing.T) { assert.Equal(t, f[19:], []byte("Z E! TEST\n")) } +func TestAddDefaultLogLevel(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "") + assert.NoError(t, err) + defer func() { os.Remove(tmpfile.Name()) }() + + SetupLogging(true, false, tmpfile.Name()) + log.Printf("TEST") + + f, err := ioutil.ReadFile(tmpfile.Name()) + assert.NoError(t, err) + assert.Equal(t, f[19:], []byte("Z I! TEST\n")) +} + func BenchmarkTelegrafLogWrite(b *testing.B) { var msg = []byte("test") var buf bytes.Buffer diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index b5c0202cc1219..4a9a470a75e13 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -71,7 +71,7 @@ func (ssl *streamSocketListener) read(c net.Conn) { for scnr.Scan() { metrics, err := ssl.Parse(scnr.Bytes()) if err != nil { - ssl.AddError(fmt.Errorf("unable to parse incoming line")) + ssl.AddError(fmt.Errorf("unable to parse incoming line: %s", err)) //TODO rate limit continue } @@ -105,7 +105,7 @@ func (psl *packetSocketListener) listen() { metrics, err := psl.Parse(buf[:n]) if err != nil { - psl.AddError(fmt.Errorf("unable to parse incoming packet")) + psl.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) //TODO rate limit continue } diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go new file mode 100644 index 0000000000000..20525610c7101 --- /dev/null +++ b/plugins/parsers/collectd/parser.go @@ -0,0 +1,165 @@ +package collectd + +import ( + "errors" + "fmt" + "log" + "os" + + "collectd.org/api" + "collectd.org/network" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +const ( + DefaultAuthFile = "/etc/collectd/auth_file" +) + +type CollectdParser struct { + // DefaultTags will be added to every parsed metric + DefaultTags map[string]string + + popts network.ParseOpts +} + +func (p *CollectdParser) SetParseOpts(popts *network.ParseOpts) { + p.popts = *popts +} + +func NewCollectdParser( + authFile string, + securityLevel string, + typesDB []string, +) (*CollectdParser, error) { + popts := network.ParseOpts{} + + switch securityLevel { + case "none": + popts.SecurityLevel = network.None + case "sign": + popts.SecurityLevel = network.Sign + case "encrypt": + popts.SecurityLevel = network.Encrypt + default: + popts.SecurityLevel = network.None + } + + if authFile == "" { + authFile = DefaultAuthFile + } + popts.PasswordLookup = network.NewAuthFile(authFile) + + for _, path := range typesDB { + db, err := LoadTypesDB(path) + if err != nil { + return nil, err + } + + if popts.TypesDB != nil { + popts.TypesDB.Merge(db) + } else { + popts.TypesDB = db + } + } + + parser := CollectdParser{popts: popts} + return &parser, nil +} + +func (p *CollectdParser) Parse(buf []byte) ([]telegraf.Metric, error) { + valueLists, err := network.Parse(buf, p.popts) + if err != nil { + return nil, fmt.Errorf("Collectd parser error: %s", err) + } + + metrics := []telegraf.Metric{} + for _, valueList := range valueLists { + metrics = append(metrics, UnmarshalValueList(valueList)...) + } + + if len(p.DefaultTags) > 0 { + for _, m := range metrics { + for k, v := range p.DefaultTags { + // only set the default tag if it doesn't already exist: + if !m.HasTag(k) { + m.AddTag(k, v) + } + } + } + } + + return metrics, nil +} + +func (p *CollectdParser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(line)) + if err != nil { + return nil, err + } + + if len(metrics) != 1 { + return nil, errors.New("Line contains multiple metrics") + } + + return metrics[0], nil +} + +func (p *CollectdParser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +// UnmarshalValueList translates a ValueList into a Telegraf metric. +func UnmarshalValueList(vl *api.ValueList) []telegraf.Metric { + timestamp := vl.Time.UTC() + + var metrics []telegraf.Metric + for i := range vl.Values { + var name string + name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) + tags := make(map[string]string) + fields := make(map[string]interface{}) + + // Convert interface back to actual type, then to float64 + switch value := vl.Values[i].(type) { + case api.Gauge: + fields["value"] = float64(value) + case api.Derive: + fields["value"] = float64(value) + case api.Counter: + fields["value"] = float64(value) + } + + if vl.Identifier.Host != "" { + tags["host"] = vl.Identifier.Host + } + if vl.Identifier.PluginInstance != "" { + tags["instance"] = vl.Identifier.PluginInstance + } + if vl.Identifier.Type != "" { + tags["type"] = vl.Identifier.Type + } + if vl.Identifier.TypeInstance != "" { + tags["type_instance"] = vl.Identifier.TypeInstance + } + + // Drop invalid points + m, err := metric.New(name, tags, fields, timestamp) + if err != nil { + log.Printf("E! Dropping metric %v: %v", name, err) + continue + } + + metrics = append(metrics, m) + } + return metrics +} + +func LoadTypesDB(path string) (*api.TypesDB, error) { + reader, err := os.Open(path) + if err != nil { + return nil, err + } + return api.NewTypesDB(reader) +} diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go new file mode 100644 index 0000000000000..3aad04013a404 --- /dev/null +++ b/plugins/parsers/collectd/parser_test.go @@ -0,0 +1,298 @@ +package collectd + +import ( + "context" + "testing" + + "collectd.org/api" + "collectd.org/network" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" +) + +type AuthMap struct { + Passwd map[string]string +} + +func (p *AuthMap) Password(user string) (string, error) { + return p.Passwd[user], nil +} + +type metricData struct { + name string + tags map[string]string + fields map[string]interface{} +} + +type testCase struct { + vl []api.ValueList + expected []metricData +} + +var singleMetric = testCase{ + []api.ValueList{ + api.ValueList{ + Identifier: api.Identifier{ + Host: "xyzzy", + Plugin: "cpu", + PluginInstance: "1", + Type: "cpu", + TypeInstance: "user", + }, + Values: []api.Value{ + api.Counter(42), + }, + DSNames: []string(nil), + }, + }, + []metricData{ + metricData{ + "cpu_value", + map[string]string{ + "type_instance": "user", + "host": "xyzzy", + "instance": "1", + "type": "cpu", + }, + map[string]interface{}{ + "value": float64(42), + }, + }, + }, +} + +var multiMetric = testCase{ + []api.ValueList{ + api.ValueList{ + Identifier: api.Identifier{ + Host: "xyzzy", + Plugin: "cpu", + PluginInstance: "0", + Type: "cpu", + TypeInstance: "user", + }, + Values: []api.Value{ + api.Derive(42), + api.Gauge(42), + }, + DSNames: []string(nil), + }, + }, + []metricData{ + metricData{ + "cpu_0", + map[string]string{ + "type_instance": "user", + "host": "xyzzy", + "instance": "0", + "type": "cpu", + }, + map[string]interface{}{ + "value": float64(42), + }, + }, + metricData{ + "cpu_1", + map[string]string{ + "type_instance": "user", + "host": "xyzzy", + "instance": "0", + "type": "cpu", + }, + map[string]interface{}{ + "value": float64(42), + }, + }, + }, +} + +func TestNewCollectdParser(t *testing.T) { + parser, err := NewCollectdParser("", "", []string{}) + require.Nil(t, err) + require.Equal(t, parser.popts.SecurityLevel, network.None) + require.NotNil(t, parser.popts.PasswordLookup) + require.Nil(t, parser.popts.TypesDB) +} + +func TestParse(t *testing.T) { + cases := []testCase{singleMetric, multiMetric} + + for _, tc := range cases { + buf, err := writeValueList(tc.vl) + require.Nil(t, err) + bytes, err := buf.Bytes() + require.Nil(t, err) + + parser := &CollectdParser{} + require.Nil(t, err) + metrics, err := parser.Parse(bytes) + require.Nil(t, err) + + assertEqualMetrics(t, tc.expected, metrics) + } +} + +func TestParse_DefaultTags(t *testing.T) { + buf, err := writeValueList(singleMetric.vl) + require.Nil(t, err) + bytes, err := buf.Bytes() + require.Nil(t, err) + + parser := &CollectdParser{} + parser.SetDefaultTags(map[string]string{ + "foo": "bar", + }) + require.Nil(t, err) + metrics, err := parser.Parse(bytes) + require.Nil(t, err) + + require.Equal(t, "bar", metrics[0].Tags()["foo"]) +} + +func TestParse_SignSecurityLevel(t *testing.T) { + parser := &CollectdParser{} + popts := &network.ParseOpts{ + SecurityLevel: network.Sign, + PasswordLookup: &AuthMap{ + map[string]string{ + "user0": "bar", + }, + }, + } + parser.SetParseOpts(popts) + + // Signed data + buf, err := writeValueList(singleMetric.vl) + require.Nil(t, err) + buf.Sign("user0", "bar") + bytes, err := buf.Bytes() + require.Nil(t, err) + + metrics, err := parser.Parse(bytes) + require.Nil(t, err) + assertEqualMetrics(t, singleMetric.expected, metrics) + + // Encrypted data + buf, err = writeValueList(singleMetric.vl) + require.Nil(t, err) + buf.Encrypt("user0", "bar") + bytes, err = buf.Bytes() + require.Nil(t, err) + + metrics, err = parser.Parse(bytes) + require.Nil(t, err) + assertEqualMetrics(t, singleMetric.expected, metrics) + + // Plain text data skipped + buf, err = writeValueList(singleMetric.vl) + require.Nil(t, err) + bytes, err = buf.Bytes() + require.Nil(t, err) + + metrics, err = parser.Parse(bytes) + require.Nil(t, err) + require.Equal(t, []telegraf.Metric{}, metrics) + + // Wrong password error + buf, err = writeValueList(singleMetric.vl) + require.Nil(t, err) + buf.Sign("x", "y") + bytes, err = buf.Bytes() + require.Nil(t, err) + + metrics, err = parser.Parse(bytes) + require.NotNil(t, err) +} + +func TestParse_EncryptSecurityLevel(t *testing.T) { + parser := &CollectdParser{} + popts := &network.ParseOpts{ + SecurityLevel: network.Encrypt, + PasswordLookup: &AuthMap{ + map[string]string{ + "user0": "bar", + }, + }, + } + parser.SetParseOpts(popts) + + // Signed data skipped + buf, err := writeValueList(singleMetric.vl) + require.Nil(t, err) + buf.Sign("user0", "bar") + bytes, err := buf.Bytes() + require.Nil(t, err) + + metrics, err := parser.Parse(bytes) + require.Nil(t, err) + require.Equal(t, []telegraf.Metric{}, metrics) + + // Encrypted data + buf, err = writeValueList(singleMetric.vl) + require.Nil(t, err) + buf.Encrypt("user0", "bar") + bytes, err = buf.Bytes() + require.Nil(t, err) + + metrics, err = parser.Parse(bytes) + require.Nil(t, err) + assertEqualMetrics(t, singleMetric.expected, metrics) + + // Plain text data skipped + buf, err = writeValueList(singleMetric.vl) + require.Nil(t, err) + bytes, err = buf.Bytes() + require.Nil(t, err) + + metrics, err = parser.Parse(bytes) + require.Nil(t, err) + require.Equal(t, []telegraf.Metric{}, metrics) + + // Wrong password error + buf, err = writeValueList(singleMetric.vl) + require.Nil(t, err) + buf.Sign("x", "y") + bytes, err = buf.Bytes() + require.Nil(t, err) + + metrics, err = parser.Parse(bytes) + require.NotNil(t, err) +} + +func TestParseLine(t *testing.T) { + buf, err := writeValueList(singleMetric.vl) + require.Nil(t, err) + bytes, err := buf.Bytes() + require.Nil(t, err) + + parser, err := NewCollectdParser("", "", []string{}) + require.Nil(t, err) + metric, err := parser.ParseLine(string(bytes)) + require.Nil(t, err) + + assertEqualMetrics(t, singleMetric.expected, []telegraf.Metric{metric}) +} + +func writeValueList(valueLists []api.ValueList) (*network.Buffer, error) { + buffer := network.NewBuffer(0) + + ctx := context.Background() + for _, vl := range valueLists { + err := buffer.Write(ctx, &vl) + if err != nil { + return nil, err + } + } + + return buffer, nil +} + +func assertEqualMetrics(t *testing.T, expected []metricData, received []telegraf.Metric) { + require.Equal(t, len(expected), len(received)) + for i, m := range received { + require.Equal(t, expected[i].name, m.Name()) + require.Equal(t, expected[i].tags, m.Tags()) + require.Equal(t, expected[i].fields, m.Fields()) + } +} diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 360d795bc58f1..bda6aeba3377b 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -5,6 +5,7 @@ import ( "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers/collectd" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" @@ -53,6 +54,13 @@ type Config struct { // MetricName applies to JSON & value. This will be the name of the measurement. MetricName string + // Authentication file for collectd + CollectdAuthFile string + // One of none (default), sign, or encrypt + CollectdSecurityLevel string + // Dataset specification for collectd + CollectdTypesDB []string + // DataType only applies to value, this will be the type to parse value to DataType string @@ -78,6 +86,9 @@ func NewParser(config *Config) (Parser, error) { case "graphite": parser, err = NewGraphiteParser(config.Separator, config.Templates, config.DefaultTags) + case "collectd": + parser, err = NewCollectdParser(config.CollectdAuthFile, + config.CollectdSecurityLevel, config.CollectdTypesDB) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } @@ -124,3 +135,11 @@ func NewValueParser( DefaultTags: defaultTags, }, nil } + +func NewCollectdParser( + authFile string, + securityLevel string, + typesDB []string, +) (Parser, error) { + return collectd.NewCollectdParser(authFile, securityLevel, typesDB) +} From 360b10c4deecb5a3b5f9322cb5702c42ed205671 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Apr 2017 10:42:11 -0700 Subject: [PATCH 059/201] Clarify precision documentation (#2655) --- internal/config/config.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index f8c3041793336..61263f49ad675 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -85,8 +85,8 @@ type AgentConfig struct { // ie, if Interval=10s then always collect on :00, :10, :20, etc. RoundInterval bool - // By default, precision will be set to the same timestamp order as the - // collection interval, with the maximum being 1s. + // By default or when set to "0s", precision will be set to the same + // timestamp order as the collection interval, with the maximum being 1s. // ie, when interval = "10s", precision will be "1s" // when interval = "250ms", precision will be "1ms" // Precision will NOT be used for service inputs. It is up to each individual @@ -230,10 +230,13 @@ var header = `# Telegraf Configuration ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" - ## By default, precision will be set to the same timestamp order as the - ## collection interval, with the maximum being 1s. - ## Precision will NOT be used for service inputs, such as logparser and statsd. - ## Valid values are "ns", "us" (or "µs"), "ms", "s". + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" ## Logging configuration: From 49ab4e26f8ea6d22d52958fc29c8e8908fa8fca0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jes=C3=BAs=20Roncero?= Date: Wed, 12 Apr 2017 20:04:44 +0100 Subject: [PATCH 060/201] Nagios plugin documentation fix (#2659) --- docs/DATA_FORMATS_INPUT.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index 59287e4a4b05e..8f80b560ee382 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -428,13 +428,13 @@ Note: Nagios Input Data Formats is only supported in `exec` input plugin. ```toml [[inputs.exec]] ## Commands array - commands = ["/usr/lib/nagios/plugins/check_load", "-w 5,6,7 -c 7,8,9"] + commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"] ## measurement name suffix (for separating different commands) name_suffix = "_mycollector" ## Data format to consume. - ## Each data format has it's own unique set of configuration options, read + ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "nagios" From 3e0c55bff9cdbc9dec34c49d8d65972812ad9d9a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 12 Apr 2017 17:10:17 -0700 Subject: [PATCH 061/201] Update grok version (#2662) --- Godeps | 2 +- plugins/inputs/logparser/grok/grok_test.go | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/Godeps b/Godeps index a41d028c8745b..0a7cc30a5df1d 100644 --- a/Godeps +++ b/Godeps @@ -49,7 +49,7 @@ github.com/shirou/gopsutil dfbb3e40da8d6fcd1aa0d87003e965fe0ca745ea github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15 github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 -github.com/vjeantet/grok 83bfdfdfd1a8146795b28e547a8e3c8b28a466c2 +github.com/vjeantet/grok d73e972b60935c7fec0b4ffbc904ed39ecaf7efe github.com/wvanbergen/kafka bc265fedb9ff5b5c5d3c0fdcef4a819b3523d3ee github.com/wvanbergen/kazoo-go 968957352185472eacb69215fa3dbfcfdbac1096 github.com/yuin/gopher-lua 66c871e454fcf10251c61bf8eff02d0978cae75a diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/inputs/logparser/grok/grok_test.go index 4e0ead6e9a67d..64fb20c4365cd 100644 --- a/plugins/inputs/logparser/grok/grok_test.go +++ b/plugins/inputs/logparser/grok/grok_test.go @@ -687,3 +687,23 @@ func TestTsModder_Rollover(t *testing.T) { } assert.Equal(t, reftime.Add(time.Nanosecond*1000), modt) } + +func TestShortPatternRegression(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TS_UNIX:timestamp:ts-unix} %{NUMBER:value:int}"}, + CustomPatterns: ` + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + `, + } + require.NoError(t, p.Compile()) + + metric, err := p.ParseLine(`Wed Apr 12 13:10:34 PST 2017 42`) + require.NoError(t, err) + require.NotNil(t, metric) + + require.Equal(t, + map[string]interface{}{ + "value": int64(42), + }, + metric.Fields()) +} From 9388fff1f7366da864c1331ec3f7810a7a308282 Mon Sep 17 00:00:00 2001 From: Chris Goffinet Date: Wed, 12 Apr 2017 20:40:10 -0400 Subject: [PATCH 062/201] Fixed content-type header in output plugin OpenTSDB (#2663) --- plugins/outputs/opentsdb/opentsdb_http.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go index 912ca670a3111..e74e74f039448 100644 --- a/plugins/outputs/opentsdb/opentsdb_http.go +++ b/plugins/outputs/opentsdb/opentsdb_http.go @@ -134,7 +134,7 @@ func (o *openTSDBHttp) flush() error { if err != nil { return fmt.Errorf("Error when building request: %s", err.Error()) } - req.Header.Set("Content-Type", "applicaton/json") + req.Header.Set("Content-Type", "application/json") req.Header.Set("Content-Encoding", "gzip") if o.Debug { From 45c9b867f63b665c0edbf15dc71bf650100a7709 Mon Sep 17 00:00:00 2001 From: Gregory Kman Date: Wed, 12 Apr 2017 19:46:48 -0500 Subject: [PATCH 063/201] Update ping-input-plugin Readme (#2651) --- plugins/inputs/ping/README.md | 35 +++++++++++++++++++++++------------ plugins/inputs/ping/ping.go | 2 +- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 38558a33cde86..b02345e8ec04d 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -1,20 +1,27 @@ -# Ping input plugin +# Ping Input plugin This input plugin will measures the round-trip -## Windows: ### Configuration: + ``` - ## urls to ping - urls = ["www.google.com"] # required - - ## number of pings to send per collection (ping -n ) - count = 4 # required - - ## Ping timeout, in seconds. 0 means default timeout (ping -w ) - Timeout = 0 +# NOTE: this plugin forks the ping command. You may need to set capabilities +# via setcap cap_net_raw+p /bin/ping +[[inputs.ping]] +## List of urls to ping +urls = ["www.google.com"] # required +## number of pings to send per collection (ping -c ) +# count = 1 +## interval, in s, at which to ping. 0 == default (ping -i ) +# ping_interval = 1.0 +## per-ping timeout, in s. 0 == no timeout (ping -W ) +# timeout = 1.0 +## interface to send ping from (ping -I ) +# interface = "" ``` + ### Measurements & Fields: + - packets_transmitted ( from ping output ) - reply_received ( increasing only on valid metric from echo replay, eg. 'Destination net unreachable' reply will increment packets_received but not reply_received ) - packets_received ( from ping output ) @@ -25,12 +32,16 @@ This input plugin will measures the round-trip - average_response_ms ( compute from minimum_response_ms and maximum_response_ms ) - minimum_response_ms ( from ping output ) - maximum_response_ms ( from ping output ) - + ### Tags: -- server + +- host +- url ### Example Output: + ``` +$ ./telegraf -config telegraf.conf -input-filter ping -test * Plugin: ping, Collection 1 ping,host=WIN-PBAPLP511R7,url=www.google.com average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000 ``` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 32264eec7efdb..f5256750d5e40 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -49,7 +49,7 @@ const sampleConfig = ` ## NOTE: this plugin forks the ping command. You may need to set capabilities ## via setcap cap_net_raw+p /bin/ping # - ## urls to ping + ## List of urls to ping urls = ["www.google.com"] # required ## number of pings to send per collection (ping -c ) # count = 1 From dff216c44d5fae38db0ae78a5dc67a320b5c5397 Mon Sep 17 00:00:00 2001 From: ingosus Date: Thu, 13 Apr 2017 22:59:28 +0300 Subject: [PATCH 064/201] Feature #1820: add testing without outputs (#2446) --- CHANGELOG.md | 1 + cmd/telegraf/telegraf.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2d9fc68e0f51..dd3ee39dfd63a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ be deprecated eventually. - [#2637](https://github.com/influxdata/telegraf/issues/2637): Add support for precision in http_listener - [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input - [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser +- [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs ### Bugfixes diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 40e90a1ec6b6b..af11e6682eb31 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -144,7 +144,7 @@ func reloadLoop( log.Fatal("E! " + err.Error()) } } - if len(c.Outputs) == 0 { + if !*fTest && len(c.Outputs) == 0 { log.Fatalf("E! Error: no outputs found, did you provide a valid config file?") } if len(c.Inputs) == 0 { From cadd845b36e7ed745e136e4bc42ff4caab6b1c82 Mon Sep 17 00:00:00 2001 From: calerogers Date: Thu, 13 Apr 2017 15:53:02 -0700 Subject: [PATCH 065/201] Irqstat input plugin (#2494) closes #2469 --- CHANGELOG.md | 1 + README.md | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/interrupts/README.md | 35 +++++ plugins/inputs/interrupts/interrupts.go | 140 +++++++++++++++++++ plugins/inputs/interrupts/interrupts_test.go | 59 ++++++++ 6 files changed, 237 insertions(+) create mode 100644 plugins/inputs/interrupts/README.md create mode 100644 plugins/inputs/interrupts/interrupts.go create mode 100644 plugins/inputs/interrupts/interrupts_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index dd3ee39dfd63a..d4792790f70ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ be deprecated eventually. ### Features +- [#2494](https://github.com/influxdata/telegraf/pull/2494): Add interrupts input plugin. - [#2094](https://github.com/influxdata/telegraf/pull/2094): Add generic socket listener & writer. - [#2204](https://github.com/influxdata/telegraf/pull/2204): Extend http_response to support searching for a substring in response. Return 1 if found, else 0. - [#2137](https://github.com/influxdata/telegraf/pull/2137): Added userstats to mysql input plugin. diff --git a/README.md b/README.md index f46c2e2984d0f..2dc6997d6944d 100644 --- a/README.md +++ b/README.md @@ -123,6 +123,7 @@ configuration options. * [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) * [internal](./plugins/inputs/internal) * [influxdb](./plugins/inputs/influxdb) +* [interrupts](./plugins/inputs/interrupts) * [ipmi_sensor](./plugins/inputs/ipmi_sensor) * [iptables](./plugins/inputs/iptables) * [jolokia](./plugins/inputs/jolokia) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 983179e903bdc..f7207da842e35 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -30,6 +30,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/internal" + _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/iptables" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md new file mode 100644 index 0000000000000..aec30094eb552 --- /dev/null +++ b/plugins/inputs/interrupts/README.md @@ -0,0 +1,35 @@ +# Interrupts Input Plugin + +The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/proc/softirqs`. + +### Configuration +``` +[[inputs.interrupts]] + ## A list of IRQs to include for metric ingestion, if not specified + ## will default to collecting all IRQs. + include = ["0", "1", "30", "NET_RX"] +``` + +### Measurements +There are two measurements reported by this plugin. +- `interrupts` gathers metrics from the `/proc/interrupts` file +- `soft_interrupts` gathers metrics from the `/proc/softirqs` file + +### Fields +- CPUx: the amount of interrupts for the IRQ handled by that CPU +- total: total amount of interrupts for all CPUs + +### Tags +- irq: the IRQ +- type: the type of interrupt +- device: the name of the device that is located at that IRQ + +### Example Output +``` +./telegraf -config ~/interrupts_config.conf -test +* Plugin: inputs.interrupts, Collection 1 +> interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,host=hostname CPU0=23i,total=23i 1489346531000000000 +> interrupts,irq=1,host=hostname,type=IO-APIC,device=1-edge\ i8042 CPU0=9i,total=9i 1489346531000000000 +> interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,host=hostname CPU0=1i,total=1i 1489346531000000000 +> soft_interrupts,irq=NET_RX,host=hostname CPU0=280879i,total=280879i 1489346531000000000 +``` diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go new file mode 100644 index 0000000000000..1feb6441cb1e6 --- /dev/null +++ b/plugins/inputs/interrupts/interrupts.go @@ -0,0 +1,140 @@ +package interrupts + +import ( + "bufio" + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "io/ioutil" + "strconv" + "strings" +) + +type Interrupts struct{} + +type IRQ struct { + ID string + Type string + Device string + Total int64 + Cpus []int64 +} + +func NewIRQ(id string) *IRQ { + return &IRQ{ID: id, Cpus: []int64{}} +} + +const sampleConfig = ` + ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. + # [inputs.interrupts.tagdrop] + # irq = [ "NET_RX", "TASKLET" ] +` + +func (s *Interrupts) Description() string { + return "This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs." +} + +func (s *Interrupts) SampleConfig() string { + return sampleConfig +} + +func parseInterrupts(irqdata string) ([]IRQ, error) { + var irqs []IRQ + var cpucount int + scanner := bufio.NewScanner(strings.NewReader(irqdata)) + ok := scanner.Scan() + if ok { + cpus := strings.Fields(scanner.Text()) + if cpus[0] == "CPU0" { + cpucount = len(cpus) + } + } else if scanner.Err() != nil { + return irqs, fmt.Errorf("Reading %s: %s", scanner.Text(), scanner.Err()) + } + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if !strings.HasSuffix(fields[0], ":") { + continue + } + irqid := strings.TrimRight(fields[0], ":") + irq := NewIRQ(irqid) + irqvals := fields[1:len(fields)] + for i := 0; i < cpucount; i++ { + if i < len(irqvals) { + irqval, err := strconv.ParseInt(irqvals[i], 10, 64) + if err != nil { + return irqs, fmt.Errorf("Unable to parse %q from %q: %s", irqvals[i], scanner.Text(), err) + } + irq.Cpus = append(irq.Cpus, irqval) + } + } + for _, irqval := range irq.Cpus { + irq.Total += irqval + } + _, err := strconv.ParseInt(irqid, 10, 64) + if err == nil && len(fields) >= cpucount+2 { + irq.Type = fields[cpucount+1] + irq.Device = strings.Join(fields[cpucount+2:], " ") + } else if len(fields) > cpucount { + irq.Type = strings.Join(fields[cpucount+1:], " ") + } + irqs = append(irqs, *irq) + } + return irqs, nil +} + +func fileToString(path string) (string, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + content := string(data) + return content, nil +} + +func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) { + tags := map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device} + fields := map[string]interface{}{"total": irq.Total} + for i := 0; i < len(irq.Cpus); i++ { + cpu := fmt.Sprintf("CPU%d", i) + fields[cpu] = irq.Cpus[i] + } + return tags, fields +} + +func (s *Interrupts) Gather(acc telegraf.Accumulator) error { + irqdata, err := fileToString("/proc/interrupts") + if err != nil { + acc.AddError(fmt.Errorf("Reading %s: %s", "/proc/interrupts", err)) + } + irqs, err := parseInterrupts(irqdata) + if err != nil { + acc.AddError(fmt.Errorf("Parsing %s: %s", "/proc/interrupts", err)) + } else { + for _, irq := range irqs { + tags, fields := gatherTagsFields(irq) + acc.AddFields("interrupts", fields, tags) + } + } + + irqdata, err = fileToString("/proc/softirqs") + if err != nil { + acc.AddError(fmt.Errorf("Reading %s: %s", "/proc/softirqs", err)) + } + irqs, err = parseInterrupts(irqdata) + if err != nil { + acc.AddError(fmt.Errorf("Parsing %s: %s", "/proc/softirqs", err)) + } else { + for _, irq := range irqs { + tags, fields := gatherTagsFields(irq) + acc.AddFields("softirqs", fields, tags) + } + } + return nil +} + +func init() { + inputs.Add("interrupts", func() telegraf.Input { + return &Interrupts{} + }) +} diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go new file mode 100644 index 0000000000000..d968eb0949189 --- /dev/null +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -0,0 +1,59 @@ +package interrupts + +import ( + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestParseInterrupts(t *testing.T) { + interruptStr := ` CPU0 CPU1 + 0: 134 0 IO-APIC-edge timer + 1: 7 3 IO-APIC-edge i8042 +NMI: 0 0 Non-maskable interrupts +LOC: 2338608687 2334309625 Local timer interrupts +MIS: 0 +NET_RX: 867028 225 +TASKLET: 205 0` + + parsed := []IRQ{ + IRQ{ + ID: "0", Type: "IO-APIC-edge", Device: "timer", + Cpus: []int64{int64(134), int64(0)}, Total: int64(134), + }, + IRQ{ + ID: "1", Type: "IO-APIC-edge", Device: "i8042", + Cpus: []int64{int64(7), int64(3)}, Total: int64(10), + }, + IRQ{ + ID: "NMI", Type: "Non-maskable interrupts", + Cpus: []int64{int64(0), int64(0)}, Total: int64(0), + }, + IRQ{ + ID: "LOC", Type: "Local timer interrupts", + Cpus: []int64{int64(2338608687), int64(2334309625)}, + Total: int64(4672918312), + }, + IRQ{ + ID: "MIS", Cpus: []int64{int64(0)}, Total: int64(0), + }, + IRQ{ + ID: "NET_RX", Cpus: []int64{int64(867028), int64(225)}, + Total: int64(867253), + }, + IRQ{ + ID: "TASKLET", Cpus: []int64{int64(205), int64(0)}, + Total: int64(205), + }, + } + got, err := parseInterrupts(interruptStr) + require.Equal(t, nil, err) + require.NotEqual(t, 0, len(got)) + require.Equal(t, len(got), len(parsed)) + for i := 0; i < len(parsed); i++ { + assert.Equal(t, parsed[i], got[i]) + for k := 0; k < len(parsed[i].Cpus); k++ { + assert.Equal(t, parsed[i].Cpus[k], got[i].Cpus[k]) + } + } +} From a12e082dbe875fec6bac5e027248694bbccb72b3 Mon Sep 17 00:00:00 2001 From: calerogers Date: Fri, 14 Apr 2017 13:40:36 -0700 Subject: [PATCH 066/201] Refactor interrupts plugin code (#2670) --- plugins/inputs/interrupts/README.md | 6 +- plugins/inputs/interrupts/interrupts.go | 63 +++++++------------- plugins/inputs/interrupts/interrupts_test.go | 5 +- 3 files changed, 29 insertions(+), 45 deletions(-) diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md index aec30094eb552..f823aae07c526 100644 --- a/plugins/inputs/interrupts/README.md +++ b/plugins/inputs/interrupts/README.md @@ -5,9 +5,9 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/p ### Configuration ``` [[inputs.interrupts]] - ## A list of IRQs to include for metric ingestion, if not specified - ## will default to collecting all IRQs. - include = ["0", "1", "30", "NET_RX"] + ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. + # [inputs.interrupts.tagdrop] + # irq = [ "NET_RX", "TASKLET" ] ``` ### Measurements diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 1feb6441cb1e6..75cbf3be13917 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -5,7 +5,8 @@ import ( "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "io/ioutil" + "io" + "os" "strconv" "strings" ) @@ -38,18 +39,16 @@ func (s *Interrupts) SampleConfig() string { return sampleConfig } -func parseInterrupts(irqdata string) ([]IRQ, error) { +func parseInterrupts(r io.Reader) ([]IRQ, error) { var irqs []IRQ var cpucount int - scanner := bufio.NewScanner(strings.NewReader(irqdata)) - ok := scanner.Scan() - if ok { + scanner := bufio.NewScanner(r) + if scanner.Scan() { cpus := strings.Fields(scanner.Text()) - if cpus[0] == "CPU0" { - cpucount = len(cpus) + if cpus[0] != "CPU0" { + return nil, fmt.Errorf("Expected first line to start with CPU0, but was %s", scanner.Text()) } - } else if scanner.Err() != nil { - return irqs, fmt.Errorf("Reading %s: %s", scanner.Text(), scanner.Err()) + cpucount = len(cpus) } for scanner.Scan() { fields := strings.Fields(scanner.Text()) @@ -80,16 +79,10 @@ func parseInterrupts(irqdata string) ([]IRQ, error) { } irqs = append(irqs, *irq) } - return irqs, nil -} - -func fileToString(path string) (string, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return "", err + if scanner.Err() != nil { + return nil, fmt.Errorf("Error scanning file: %s", scanner.Err()) } - content := string(data) - return content, nil + return irqs, nil } func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) { @@ -103,31 +96,21 @@ func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) { } func (s *Interrupts) Gather(acc telegraf.Accumulator) error { - irqdata, err := fileToString("/proc/interrupts") - if err != nil { - acc.AddError(fmt.Errorf("Reading %s: %s", "/proc/interrupts", err)) - } - irqs, err := parseInterrupts(irqdata) - if err != nil { - acc.AddError(fmt.Errorf("Parsing %s: %s", "/proc/interrupts", err)) - } else { - for _, irq := range irqs { - tags, fields := gatherTagsFields(irq) - acc.AddFields("interrupts", fields, tags) + for measurement, file := range map[string]string{"interrupts": "/proc/interrupts", "soft_interrupts": "/proc/softirqs"} { + f, err := os.Open(file) + if err != nil { + acc.AddError(fmt.Errorf("Could not open file: %s", file)) + continue + } + defer f.Close() + irqs, err := parseInterrupts(f) + if err != nil { + acc.AddError(fmt.Errorf("Parsing %s: %s", file, err)) + continue } - } - - irqdata, err = fileToString("/proc/softirqs") - if err != nil { - acc.AddError(fmt.Errorf("Reading %s: %s", "/proc/softirqs", err)) - } - irqs, err = parseInterrupts(irqdata) - if err != nil { - acc.AddError(fmt.Errorf("Parsing %s: %s", "/proc/softirqs", err)) - } else { for _, irq := range irqs { tags, fields := gatherTagsFields(irq) - acc.AddFields("softirqs", fields, tags) + acc.AddFields(measurement, fields, tags) } } return nil diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index d968eb0949189..6c76c8504c1c4 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -1,6 +1,7 @@ package interrupts import ( + "bytes" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" @@ -15,7 +16,7 @@ LOC: 2338608687 2334309625 Local timer interrupts MIS: 0 NET_RX: 867028 225 TASKLET: 205 0` - + f := bytes.NewBufferString(interruptStr) parsed := []IRQ{ IRQ{ ID: "0", Type: "IO-APIC-edge", Device: "timer", @@ -46,7 +47,7 @@ TASKLET: 205 0` Total: int64(205), }, } - got, err := parseInterrupts(interruptStr) + got, err := parseInterrupts(f) require.Equal(t, nil, err) require.NotEqual(t, 0, len(got)) require.Equal(t, len(got), len(parsed)) From b90a5b48a1e322a8484ae496311ff7e95e4eb5a2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Apr 2017 13:47:43 -0700 Subject: [PATCH 067/201] Improve logparser README (#2664) --- plugins/inputs/logparser/README.md | 142 ++++++++++++++++++++++---- plugins/inputs/logparser/grok/grok.go | 1 + 2 files changed, 121 insertions(+), 22 deletions(-) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 5973d9f422c8b..177d77a98563f 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,6 +1,6 @@ -# logparser Input Plugin +# Logparser Input Plugin -The logparser plugin streams and parses the given logfiles. Currently it only +The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. @@ -37,35 +37,28 @@ regex patterns. ''' ``` -## Grok Parser - -The grok parser uses a slightly modified version of logstash "grok" patterns, -with the format - -``` -%{[:][:]} -``` - -Telegraf has many of it's own -[built-in patterns](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/logparser/grok/patterns/influx-patterns), -as well as supporting -[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). - +### Grok Parser The best way to get acquainted with grok patterns is to read the logstash docs, which are available here: https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html +The Telegraf grok parser uses a slightly modified version of logstash "grok" +patterns, with the format -If you need help building patterns to match your logs, -you will find the http://grokdebug.herokuapp.com application quite useful! +``` +%{[:][:]} +``` +The `capture_syntax` defines the grok pattern that's used to parse the input +line and the `semantic_name` is used to name the field or tag. The extension +`modifier` controls the data type that the parsed item is converted to or +other special handling. By default all named captures are converted into string fields. -Modifiers can be used to convert captures to other types or tags. Timestamp modifiers can be used to convert captures to the timestamp of the - parsed metric. - +parsed metric. If no timestamp is parsed the metric will be created using the +current time. - Available modifiers: - string (default if nothing is specified) @@ -91,7 +84,112 @@ Timestamp modifiers can be used to convert captures to the timestamp of the - ts-epochnano (nanoseconds since unix epoch) - ts-"CUSTOM" - CUSTOM time layouts must be within quotes and be the representation of the "reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006` See https://golang.org/pkg/time/#Parse for more details. + +Telegraf has many of its own +[built-in patterns](./grok/patterns/influx-patterns), +as well as supporting +[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). + +If you need help building patterns to match your logs, +you will find the https://grokdebug.herokuapp.com application quite useful! + +#### Timestamp Examples + +This example input and config parses a file using a custom timestamp conversion: + +``` +2017-02-21 13:10:34 value=42 +``` + +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] +``` + +This example parses a file using a built-in conversion and a custom pattern: + +``` +Wed Apr 12 13:10:34 PST 2017 value=42 +``` + +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] + custom_patterns = ''' + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + ''' +``` + +#### TOML Escaping + +When saving patterns to the configuration file, keep in mind the different TOML +[string](https://github.com/toml-lang/toml#string) types and the escaping +rules for each. These escaping rules must be applied in addition to the +escaping required by the grok syntax. Using the Multi-line line literal +syntax with `'''` may be useful. + +The following config examples will parse this input file: + +``` +|42|\uD83D\uDC2F|'telegraf'| +``` + +Since `|` is a special character in the grok language, we must escape it to +get a literal `|`. With a basic TOML string, special characters such as +backslash must be escaped, requiring us to escape the backslash a second time. + +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" +``` + +We cannot use a literal TOML string for the pattern, because we cannot match a +`'` within it. However, it works well for the custom pattern. +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +A multi-line literal string allows us to encode the pattern: +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = [''' + \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| + '''] + custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +### Tips for creating patterns + +Writing complex patterns can be difficult, here is some advice for writing a +new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). + +Create a file output that writes to stdout, and disable other outputs while +testing. This will allow you to see the captured metrics. Keep in mind that +the file output will only print once per `flush_interval`. + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +- Start with a file containing only a single line of your input. +- Remove all but the first token or piece of the line. +- Add the section of your pattern to match this piece to your configuration file. +- Verify that the metric is parsed successfully by running Telegraf. +- If successful, add the next token, update the pattern and retest. +- Continue one token at a time until the entire line is successfully parsed. + +### Additional Resources + +- https://www.influxdata.com/telegraf-correlate-log-metrics-data-performance-bottlenecks/ diff --git a/plugins/inputs/logparser/grok/grok.go b/plugins/inputs/logparser/grok/grok.go index 7131b824923e1..f684e933907d5 100644 --- a/plugins/inputs/logparser/grok/grok.go +++ b/plugins/inputs/logparser/grok/grok.go @@ -168,6 +168,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } if len(values) == 0 { + log.Printf("D! Grok no match found for: %q", line) return nil, nil } From b968759d1015f47bf60de5ec05e762a994692110 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 13 Apr 2017 18:56:04 -0700 Subject: [PATCH 068/201] Use variadic disk.IOCounters() function --- Godeps | 2 +- plugins/inputs/system/ps.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Godeps b/Godeps index 0a7cc30a5df1d..510f5b1ed00f5 100644 --- a/Godeps +++ b/Godeps @@ -45,7 +45,7 @@ github.com/prometheus/common dd2f054febf4a6c00f2343686efb775948a8bff4 github.com/prometheus/procfs 1878d9fbb537119d24b21ca07effd591627cd160 github.com/rcrowley/go-metrics 1f30fe9094a513ce4c700b9a54458bbb0c96996c github.com/samuel/go-zookeeper 1d7be4effb13d2d908342d349d71a284a7542693 -github.com/shirou/gopsutil dfbb3e40da8d6fcd1aa0d87003e965fe0ca745ea +github.com/shirou/gopsutil 70693b6a3da51a8a686d31f1b346077bbc066062 github.com/soniah/gosnmp 5ad50dc75ab389f8a1c9f8a67d3a1cd85f67ed15 github.com/streadway/amqp 63795daa9a446c920826655f26ba31c81c860fd6 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index d253278122d35..20e01742a49e8 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -121,7 +121,7 @@ func (s *systemPS) NetConnections() ([]net.ConnectionStat, error) { } func (s *systemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { - m, err := disk.IOCountersForNames(names) + m, err := disk.IOCounters(names...) if err == internal.NotImplementedError { return nil, nil } From dc5779e2a7f60bd28db4161115d0e2b871b27e76 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 14 Apr 2017 17:32:14 -0700 Subject: [PATCH 069/201] Rename heap_objects_bytes to heap_objects in internal plugin. (#2674) * Rename heap_objects_bytes to heap_objects in internal plugin. This field does not contain bytes fixes #2671 --- CHANGELOG.md | 1 + plugins/inputs/internal/internal.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d4792790f70ea..989e3f7a9298c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -102,6 +102,7 @@ be deprecated eventually. - [#2410](https://github.com/influxdata/telegraf/issues/2410): Fix connection leak in postgresql. - [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input. - [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks +- [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects` ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/internal/internal.go b/plugins/inputs/internal/internal.go index f6123edd59eca..8b5286f5637f5 100644 --- a/plugins/inputs/internal/internal.go +++ b/plugins/inputs/internal/internal.go @@ -48,7 +48,7 @@ func (s *Self) Gather(acc telegraf.Accumulator) error { "heap_idle_bytes": m.HeapIdle, // bytes in idle spans "heap_in_use_bytes": m.HeapInuse, // bytes in non-idle span "heap_released_bytes": m.HeapReleased, // bytes released to the OS - "heap_objects_bytes": m.HeapObjects, // total number of allocated objects + "heap_objects": m.HeapObjects, // total number of allocated objects "num_gc": m.NumGC, } acc.AddFields("internal_memstats", fields, map[string]string{}) From 58ee96267900a5451efd9eb488b7f4e72fe4694d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20de=20Metz?= Date: Mon, 17 Apr 2017 18:42:03 +0000 Subject: [PATCH 070/201] GitHub webhooks: check signature (#2493) --- CHANGELOG.md | 1 + etc/telegraf.conf | 1 + plugins/inputs/webhooks/github/README.md | 2 ++ .../inputs/webhooks/github/github_webhooks.go | 28 ++++++++++++++-- .../webhooks/github/github_webhooks_test.go | 33 +++++++++++++++++++ plugins/inputs/webhooks/webhooks.go | 1 + 6 files changed, 63 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 989e3f7a9298c..6715ef3bb8fd9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -70,6 +70,7 @@ be deprecated eventually. - [#2636](https://github.com/influxdata/telegraf/pull/2636): Add `message_len_max` option to `kafka_consumer` input - [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser - [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs +- [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin ### Bugfixes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 63e41d7bbcd45..07ae5ac8fd036 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -2382,6 +2382,7 @@ # # [inputs.webhooks.github] # path = "/github" +# # secret = "" # # [inputs.webhooks.mandrill] # path = "/mandrill" diff --git a/plugins/inputs/webhooks/github/README.md b/plugins/inputs/webhooks/github/README.md index 68594cd78f86b..908d92a639d57 100644 --- a/plugins/inputs/webhooks/github/README.md +++ b/plugins/inputs/webhooks/github/README.md @@ -2,6 +2,8 @@ You should configure your Organization's Webhooks to point at the `webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://:1619/github`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me everything'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file. +You can also add a secret that will be used by telegraf to verify the authenticity of the requests. + ## Events The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows: diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index a31c6fdf2280c..0bb792bf5df08 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -1,6 +1,9 @@ package github import ( + "crypto/hmac" + "crypto/sha1" + "encoding/hex" "encoding/json" "io/ioutil" "log" @@ -11,8 +14,9 @@ import ( ) type GithubWebhook struct { - Path string - acc telegraf.Accumulator + Path string + Secret string + acc telegraf.Accumulator } func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { @@ -23,12 +27,19 @@ func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - eventType := r.Header["X-Github-Event"][0] + eventType := r.Header.Get("X-Github-Event") data, err := ioutil.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return } + + if gh.Secret != "" && !checkSignature(gh.Secret, data, r.Header.Get("X-Hub-Signature")) { + log.Printf("E! Fail to check the github webhook signature\n") + w.WriteHeader(http.StatusBadRequest) + return + } + e, err := NewEvent(data, eventType) if err != nil { w.WriteHeader(http.StatusBadRequest) @@ -108,3 +119,14 @@ func NewEvent(data []byte, name string) (Event, error) { } return nil, &newEventError{"Not a recognized event type"} } + +func checkSignature(secret string, data []byte, signature string) bool { + return hmac.Equal([]byte(signature), []byte(generateSignature(secret, data))) +} + +func generateSignature(secret string, data []byte) string { + mac := hmac.New(sha1.New, []byte(secret)) + mac.Write(data) + result := mac.Sum(nil) + return "sha1=" + hex.EncodeToString(result) +} diff --git a/plugins/inputs/webhooks/github/github_webhooks_test.go b/plugins/inputs/webhooks/github/github_webhooks_test.go index 0ec9917264374..65041e4a06125 100644 --- a/plugins/inputs/webhooks/github/github_webhooks_test.go +++ b/plugins/inputs/webhooks/github/github_webhooks_test.go @@ -21,6 +21,19 @@ func GithubWebhookRequest(event string, jsonString string, t *testing.T) { } } +func GithubWebhookRequestWithSignature(event string, jsonString string, t *testing.T, signature string, expectedStatus int) { + var acc testutil.Accumulator + gh := &GithubWebhook{Path: "/github", Secret: "signature", acc: &acc} + req, _ := http.NewRequest("POST", "/github", strings.NewReader(jsonString)) + req.Header.Add("X-Github-Event", event) + req.Header.Add("X-Hub-Signature", signature) + w := httptest.NewRecorder() + gh.eventHandler(w, req) + if w.Code != expectedStatus { + t.Errorf("POST "+event+" returned HTTP status code %v.\nExpected %v", w.Code, expectedStatus) + } +} + func TestCommitCommentEvent(t *testing.T) { GithubWebhookRequest("commit_comment", CommitCommentEventJSON(), t) } @@ -100,3 +113,23 @@ func TestTeamAddEvent(t *testing.T) { func TestWatchEvent(t *testing.T) { GithubWebhookRequest("watch", WatchEventJSON(), t) } + +func TestEventWithSignatureFail(t *testing.T) { + GithubWebhookRequestWithSignature("watch", WatchEventJSON(), t, "signature", http.StatusBadRequest) +} + +func TestEventWithSignatureSuccess(t *testing.T) { + GithubWebhookRequestWithSignature("watch", WatchEventJSON(), t, generateSignature("signature", []byte(WatchEventJSON())), http.StatusOK) +} + +func TestCheckSignatureSuccess(t *testing.T) { + if !checkSignature("my_little_secret", []byte("random-signature-body"), "sha1=3dca279e731c97c38e3019a075dee9ebbd0a99f0") { + t.Errorf("check signature failed") + } +} + +func TestCheckSignatureFailed(t *testing.T) { + if checkSignature("m_little_secret", []byte("random-signature-body"), "sha1=3dca279e731c97c38e3019a075dee9ebbd0a99f0") { + t.Errorf("check signature failed") + } +} diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index fcddbebd7f6e3..bc8519d7a4ede 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -47,6 +47,7 @@ func (wb *Webhooks) SampleConfig() string { [inputs.webhooks.github] path = "/github" + # secret = "" [inputs.webhooks.mandrill] path = "/mandrill" From 70b3e763e79d2c9dfaed228f9eaf9591655a1505 Mon Sep 17 00:00:00 2001 From: Ross McDonald Date: Mon, 17 Apr 2017 15:49:36 -0500 Subject: [PATCH 071/201] Add input for receiving papertrail webhooks (#2038) --- CHANGELOG.md | 1 + plugins/inputs/webhooks/README.md | 1 + plugins/inputs/webhooks/papertrail/README.md | 32 ++++ .../webhooks/papertrail/papertrail_test.go | 181 ++++++++++++++++++ .../papertrail/papertrail_webhooks.go | 79 ++++++++ .../papertrail/papertrail_webhooks_models.go | 41 ++++ plugins/inputs/webhooks/webhooks.go | 13 +- plugins/inputs/webhooks/webhooks_test.go | 7 + 8 files changed, 351 insertions(+), 4 deletions(-) create mode 100644 plugins/inputs/webhooks/papertrail/README.md create mode 100644 plugins/inputs/webhooks/papertrail/papertrail_test.go create mode 100644 plugins/inputs/webhooks/papertrail/papertrail_webhooks.go create mode 100644 plugins/inputs/webhooks/papertrail/papertrail_webhooks_models.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 6715ef3bb8fd9..0c7b7c2fd73a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ be deprecated eventually. - [#1100](https://github.com/influxdata/telegraf/issues/1100): Add collectd parser - [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs - [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin +- [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks ### Bugfixes diff --git a/plugins/inputs/webhooks/README.md b/plugins/inputs/webhooks/README.md index bc7714e9e5b38..8b789e338aeba 100644 --- a/plugins/inputs/webhooks/README.md +++ b/plugins/inputs/webhooks/README.md @@ -19,6 +19,7 @@ $ sudo service telegraf start - [Github](github/) - [Mandrill](mandrill/) - [Rollbar](rollbar/) +- [Papertrail](papertrail/) ## Adding new webhooks plugin diff --git a/plugins/inputs/webhooks/papertrail/README.md b/plugins/inputs/webhooks/papertrail/README.md new file mode 100644 index 0000000000000..a3463dcaa6f8b --- /dev/null +++ b/plugins/inputs/webhooks/papertrail/README.md @@ -0,0 +1,32 @@ +# papertrail webhooks + +Enables Telegraf to act as a [Papertrail Webhook](http://help.papertrailapp.com/kb/how-it-works/web-hooks/). + +## Events + +[Full documentation](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#callback). + +Events from Papertrail come in two forms: + +* The [event-based callback](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#callback): + + * A point is created per event, with the timestamp as `received_at` + * Each point has a field counter (`count`), which is set to `1` (signifying the event occurred) + * Each event "hostname" object is converted to a `host` tag + * The "saved_search" name in the payload is added as an `event` tag + +* The [count-based callback](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#count-only-webhooks) + + * A point is created per timeseries object per count, with the timestamp as the "timeseries" key (the unix epoch of the event) + * Each point has a field counter (`count`), which is set to the value of each "timeseries" object + * Each count "source_name" object is converted to a `host` tag + * The "saved_search" name in the payload is added as an `event` tag + +The current functionality is very basic, however this allows you to +track the number of events by host and saved search. + +When an event is received, any point will look similar to: + +``` +papertrail,host=myserver.example.com,event=saved_search_name count=3i 1453248892000000000 +``` diff --git a/plugins/inputs/webhooks/papertrail/papertrail_test.go b/plugins/inputs/webhooks/papertrail/papertrail_test.go new file mode 100644 index 0000000000000..14b8aec895c98 --- /dev/null +++ b/plugins/inputs/webhooks/papertrail/papertrail_test.go @@ -0,0 +1,181 @@ +package papertrail + +import ( + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const ( + contentType = "application/x-www-form-urlencoded" +) + +func post(pt *PapertrailWebhook, contentType string, body string) *httptest.ResponseRecorder { + req, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + req.Header.Set("Content-Type", contentType) + w := httptest.NewRecorder() + pt.eventHandler(w, req) + return w +} + +func TestWrongContentType(t *testing.T) { + var acc testutil.Accumulator + pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc} + form := url.Values{} + form.Set("payload", sampleEventPayload) + data := form.Encode() + + resp := post(pt, "", data) + require.Equal(t, http.StatusUnsupportedMediaType, resp.Code) +} + +func TestMissingPayload(t *testing.T) { + var acc testutil.Accumulator + pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc} + + resp := post(pt, contentType, "") + require.Equal(t, http.StatusBadRequest, resp.Code) +} + +func TestPayloadNotJSON(t *testing.T) { + var acc testutil.Accumulator + pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc} + + resp := post(pt, contentType, "payload={asdf]") + require.Equal(t, http.StatusBadRequest, resp.Code) +} + +func TestPayloadInvalidJSON(t *testing.T) { + var acc testutil.Accumulator + pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc} + + resp := post(pt, contentType, `payload={"value": 42}`) + require.Equal(t, http.StatusBadRequest, resp.Code) +} + +func TestEventPayload(t *testing.T) { + var acc testutil.Accumulator + pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc} + + form := url.Values{} + form.Set("payload", sampleEventPayload) + resp := post(pt, contentType, form.Encode()) + require.Equal(t, http.StatusOK, resp.Code) + + fields := map[string]interface{}{ + "count": uint64(1), + } + + tags1 := map[string]string{ + "event": "Important stuff", + "host": "abc", + } + tags2 := map[string]string{ + "event": "Important stuff", + "host": "def", + } + + acc.AssertContainsTaggedFields(t, "papertrail", fields, tags1) + acc.AssertContainsTaggedFields(t, "papertrail", fields, tags2) +} + +func TestCountPayload(t *testing.T) { + var acc testutil.Accumulator + pt := &PapertrailWebhook{Path: "/papertrail", acc: &acc} + form := url.Values{} + form.Set("payload", sampleCountPayload) + resp := post(pt, contentType, form.Encode()) + require.Equal(t, http.StatusOK, resp.Code) + + fields1 := map[string]interface{}{ + "count": uint64(5), + } + fields2 := map[string]interface{}{ + "count": uint64(3), + } + + tags1 := map[string]string{ + "event": "Important stuff", + "host": "arthur", + } + tags2 := map[string]string{ + "event": "Important stuff", + "host": "ford", + } + + acc.AssertContainsTaggedFields(t, "papertrail", fields1, tags1) + acc.AssertContainsTaggedFields(t, "papertrail", fields2, tags2) +} + +const sampleEventPayload = `{ + "events": [ + { + "id": 7711561783320576, + "received_at": "2011-05-18T20:30:02-07:00", + "display_received_at": "May 18 20:30:02", + "source_ip": "208.75.57.121", + "source_name": "abc", + "source_id": 2, + "hostname": "abc", + "program": "CROND", + "severity": "Info", + "facility": "Cron", + "message": "message body" + }, + { + "id": 7711562567655424, + "received_at": "2011-05-18T20:30:02-07:00", + "display_received_at": "May 18 20:30:02", + "source_ip": "208.75.57.120", + "source_name": "server1", + "source_id": 19, + "hostname": "def", + "program": "CROND", + "severity": "Info", + "facility": "Cron", + "message": "A short event" + } + ], + "saved_search": { + "id": 42, + "name": "Important stuff", + "query": "cron OR server1", + "html_edit_url": "https://papertrailapp.com/searches/42/edit", + "html_search_url": "https://papertrailapp.com/searches/42" + }, + "max_id": "7711582041804800", + "min_id": "7711561783320576" +}` + +const sampleCountPayload = `{ + "counts": [ + { + "source_name": "arthur", + "source_id": 4, + "timeseries": { + "1453248895": 5 + } + }, + { + "source_name": "ford", + "source_id": 3, + "timeseries": { + "1453248927": 3 + } + } + ], + "saved_search": { + "id": 42, + "name": "Important stuff", + "query": "cron OR server1", + "html_edit_url": "https://papertrailapp.com/searches/42/edit", + "html_search_url": "https://papertrailapp.com/searches/42" + }, + "max_id": "7711582041804800", + "min_id": "7711561783320576" +}` diff --git a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go new file mode 100644 index 0000000000000..42453c1309d93 --- /dev/null +++ b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go @@ -0,0 +1,79 @@ +package papertrail + +import ( + "encoding/json" + "log" + "net/http" + "time" + + "github.com/gorilla/mux" + "github.com/influxdata/telegraf" +) + +type PapertrailWebhook struct { + Path string + acc telegraf.Accumulator +} + +func (pt *PapertrailWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { + router.HandleFunc(pt.Path, pt.eventHandler).Methods("POST") + log.Printf("I! Started the papertrail_webhook on %s", pt.Path) + pt.acc = acc +} + +func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Content-Type") != "application/x-www-form-urlencoded" { + http.Error(w, "Unsupported Media Type", http.StatusUnsupportedMediaType) + return + } + + data := r.PostFormValue("payload") + if data == "" { + http.Error(w, "Bad Request", http.StatusBadRequest) + return + } + + var payload Payload + err := json.Unmarshal([]byte(data), &payload) + if err != nil { + http.Error(w, "Bad Request", http.StatusBadRequest) + return + } + + if payload.Events != nil { + + // Handle event-based payload + for _, e := range payload.Events { + // Warning: Duplicate event timestamps will overwrite each other + tags := map[string]string{ + "host": e.Hostname, + "event": payload.SavedSearch.Name, + } + fields := map[string]interface{}{ + "count": uint64(1), + } + pt.acc.AddFields("papertrail", fields, tags, e.ReceivedAt) + } + + } else if payload.Counts != nil { + + // Handle count-based payload + for _, c := range payload.Counts { + for ts, count := range *c.TimeSeries { + tags := map[string]string{ + "host": c.SourceName, + "event": payload.SavedSearch.Name, + } + fields := map[string]interface{}{ + "count": count, + } + pt.acc.AddFields("papertrail", fields, tags, time.Unix(ts, 0)) + } + } + } else { + http.Error(w, "Bad Request", http.StatusBadRequest) + return + } + + w.WriteHeader(http.StatusOK) +} diff --git a/plugins/inputs/webhooks/papertrail/papertrail_webhooks_models.go b/plugins/inputs/webhooks/papertrail/papertrail_webhooks_models.go new file mode 100644 index 0000000000000..dd4e8d8bd5054 --- /dev/null +++ b/plugins/inputs/webhooks/papertrail/papertrail_webhooks_models.go @@ -0,0 +1,41 @@ +package papertrail + +import ( + "time" +) + +type Event struct { + ID int64 `json:"id"` + ReceivedAt time.Time `json:"received_at"` + DisplayReceivedAt string `json:"display_received_at"` + SourceIP string `json:"source_ip"` + SourceName string `json:"source_name"` + SourceID int `json:"source_id"` + Hostname string `json:"hostname"` + Program string `json:"program"` + Severity string `json:"severity"` + Facility string `json:"facility"` + Message string `json:"message"` +} + +type Count struct { + SourceName string `json:"source_name"` + SourceID int64 `json:"source_id"` + TimeSeries *map[int64]uint64 `json:"timeseries"` +} + +type SavedSearch struct { + ID int64 `json:"id"` + Name string `json:"name"` + Query string `json:"query"` + EditURL string `json:"html_edit_url"` + SearchURL string `json:"html_search_url"` +} + +type Payload struct { + Events []*Event `json:"events"` + Counts []*Count `json:"counts"` + SavedSearch *SavedSearch `json:"saved_search"` + MaxID string `json:"max_id"` + MinID string `json:"min_id"` +} diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index bc8519d7a4ede..7ed1ccd5191c2 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -13,6 +13,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/webhooks/filestack" "github.com/influxdata/telegraf/plugins/inputs/webhooks/github" "github.com/influxdata/telegraf/plugins/inputs/webhooks/mandrill" + "github.com/influxdata/telegraf/plugins/inputs/webhooks/papertrail" "github.com/influxdata/telegraf/plugins/inputs/webhooks/rollbar" ) @@ -27,10 +28,11 @@ func init() { type Webhooks struct { ServiceAddress string - Github *github.GithubWebhook - Filestack *filestack.FilestackWebhook - Mandrill *mandrill.MandrillWebhook - Rollbar *rollbar.RollbarWebhook + Github *github.GithubWebhook + Filestack *filestack.FilestackWebhook + Mandrill *mandrill.MandrillWebhook + Rollbar *rollbar.RollbarWebhook + Papertrail *papertrail.PapertrailWebhook } func NewWebhooks() *Webhooks { @@ -54,6 +56,9 @@ func (wb *Webhooks) SampleConfig() string { [inputs.webhooks.rollbar] path = "/rollbar" + + [inputs.webhooks.papertrail] + path = "/papertrail" ` } diff --git a/plugins/inputs/webhooks/webhooks_test.go b/plugins/inputs/webhooks/webhooks_test.go index 85d359e1c8a5d..6d3448870efdd 100644 --- a/plugins/inputs/webhooks/webhooks_test.go +++ b/plugins/inputs/webhooks/webhooks_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/influxdata/telegraf/plugins/inputs/webhooks/github" + "github.com/influxdata/telegraf/plugins/inputs/webhooks/papertrail" "github.com/influxdata/telegraf/plugins/inputs/webhooks/rollbar" ) @@ -26,4 +27,10 @@ func TestAvailableWebhooks(t *testing.T) { if !reflect.DeepEqual(wb.AvailableWebhooks(), expected) { t.Errorf("expected to be %v.\nGot %v", expected, wb.AvailableWebhooks()) } + + wb.Papertrail = &papertrail.PapertrailWebhook{Path: "/papertrail"} + expected = append(expected, wb.Papertrail) + if !reflect.DeepEqual(wb.AvailableWebhooks(), expected) { + t.Errorf("expected to be %v.\nGot %v", expected, wb.AvailableWebhooks()) + } } From eb7ef5392e14b964877a2239976267d86dbd83c6 Mon Sep 17 00:00:00 2001 From: Nikolay Denev Date: Tue, 18 Apr 2017 19:42:58 +0100 Subject: [PATCH 072/201] Simplify system.DiskUsage() (#2630) --- plugins/inputs/system/cpu.go | 2 +- plugins/inputs/system/disk.go | 5 +- plugins/inputs/system/disk_test.go | 108 +++++++++++++++++++++++++++++ plugins/inputs/system/memory.go | 5 +- plugins/inputs/system/mock_PS.go | 44 ++++++++++++ plugins/inputs/system/net.go | 2 +- plugins/inputs/system/netstat.go | 2 +- plugins/inputs/system/ps.go | 72 +++++++++++++------ 8 files changed, 212 insertions(+), 28 deletions(-) diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go index 3ed2606fac811..e6aa9f22d9901 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -121,7 +121,7 @@ func init() { return &CPUStats{ PerCPU: true, TotalCPU: true, - ps: &systemPS{}, + ps: newSystemPS(), } }) } diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index 004466f836013..46f2219a783da 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -219,11 +219,12 @@ func (s *DiskIOStats) diskTags(devName string) map[string]string { } func init() { + ps := newSystemPS() inputs.Add("disk", func() telegraf.Input { - return &DiskStats{ps: &systemPS{}} + return &DiskStats{ps: ps} }) inputs.Add("diskio", func() telegraf.Input { - return &DiskIOStats{ps: &systemPS{}, SkipSerialNumber: true} + return &DiskIOStats{ps: ps, SkipSerialNumber: true} }) } diff --git a/plugins/inputs/system/disk_test.go b/plugins/inputs/system/disk_test.go index fc0ff4d0d8cd8..5ba4d041f66c5 100644 --- a/plugins/inputs/system/disk_test.go +++ b/plugins/inputs/system/disk_test.go @@ -1,14 +1,122 @@ package system import ( + "os" "testing" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/disk" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) +type MockFileInfo struct { + os.FileInfo +} + +func TestDiskUsage(t *testing.T) { + mck := &mock.Mock{} + mps := MockPSDisk{&systemPS{&mockDiskUsage{mck}}, mck} + defer mps.AssertExpectations(t) + + var acc testutil.Accumulator + var err error + + psAll := []disk.PartitionStat{ + { + Device: "/dev/sda", + Mountpoint: "/", + Fstype: "ext4", + Opts: "", + }, + { + Device: "/dev/sdb", + Mountpoint: "/home", + Fstype: "ext4", + Opts: "", + }, + } + duAll := []disk.UsageStat{ + { + Path: "/", + Fstype: "ext4", + Total: 128, + Free: 23, + Used: 100, + InodesTotal: 1234, + InodesFree: 234, + InodesUsed: 1000, + }, + { + Path: "/home", + Fstype: "ext4", + Total: 256, + Free: 46, + Used: 200, + InodesTotal: 2468, + InodesFree: 468, + InodesUsed: 2000, + }, + } + + mps.On("Partitions", true).Return(psAll, nil) + mps.On("OSGetenv", "HOST_MOUNT_PREFIX").Return("") + mps.On("OSStat", "/").Return(MockFileInfo{}, nil) + mps.On("OSStat", "/home").Return(MockFileInfo{}, nil) + mps.On("PSDiskUsage", "/").Return(&duAll[0], nil) + mps.On("PSDiskUsage", "/home").Return(&duAll[1], nil) + + err = (&DiskStats{ps: mps}).Gather(&acc) + require.NoError(t, err) + + numDiskMetrics := acc.NFields() + expectedAllDiskMetrics := 14 + assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics) + + tags1 := map[string]string{ + "path": "/", + "fstype": "ext4", + "device": "sda", + } + tags2 := map[string]string{ + "path": "/home", + "fstype": "ext4", + "device": "sdb", + } + + fields1 := map[string]interface{}{ + "total": uint64(128), + "used": uint64(100), + "free": uint64(23), + "inodes_total": uint64(1234), + "inodes_free": uint64(234), + "inodes_used": uint64(1000), + "used_percent": float64(81.30081300813008), + } + fields2 := map[string]interface{}{ + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + } + acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) + acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) + + // We expect 6 more DiskMetrics to show up with an explicit match on "/" + // and /home not matching the /dev in MountPoints + err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) + assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) + + // We should see all the diskpoints as MountPoints includes both + // / and /home + err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) + assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) +} + func TestDiskStats(t *testing.T) { var mps MockPS defer mps.AssertExpectations(t) diff --git a/plugins/inputs/system/memory.go b/plugins/inputs/system/memory.go index 26dc550f8ac80..3f679b36c53df 100644 --- a/plugins/inputs/system/memory.go +++ b/plugins/inputs/system/memory.go @@ -73,11 +73,12 @@ func (s *SwapStats) Gather(acc telegraf.Accumulator) error { } func init() { + ps := newSystemPS() inputs.Add("mem", func() telegraf.Input { - return &MemStats{ps: &systemPS{}} + return &MemStats{ps: ps} }) inputs.Add("swap", func() telegraf.Input { - return &SwapStats{ps: &systemPS{}} + return &SwapStats{ps: ps} }) } diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index a83a8b80332d8..d5093f0315844 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -1,6 +1,8 @@ package system import ( + "os" + "github.com/stretchr/testify/mock" "github.com/shirou/gopsutil/cpu" @@ -13,6 +15,16 @@ import ( type MockPS struct { mock.Mock + PSDiskDeps +} + +type MockPSDisk struct { + *systemPS + *mock.Mock +} + +type mockDiskUsage struct { + *mock.Mock } func (m *MockPS) LoadAvg() (*load.AvgStat, error) { @@ -96,3 +108,35 @@ func (m *MockPS) NetConnections() ([]net.ConnectionStat, error) { return r0, r1 } + +func (m *mockDiskUsage) Partitions(all bool) ([]disk.PartitionStat, error) { + ret := m.Called(all) + + r0 := ret.Get(0).([]disk.PartitionStat) + r1 := ret.Error(1) + + return r0, r1 +} + +func (m *mockDiskUsage) OSGetenv(key string) string { + ret := m.Called(key) + return ret.Get(0).(string) +} + +func (m *mockDiskUsage) OSStat(name string) (os.FileInfo, error) { + ret := m.Called(name) + + r0 := ret.Get(0).(os.FileInfo) + r1 := ret.Error(1) + + return r0, r1 +} + +func (m *mockDiskUsage) PSDiskUsage(path string) (*disk.UsageStat, error) { + ret := m.Called(path) + + r0 := ret.Get(0).(*disk.UsageStat) + r1 := ret.Error(1) + + return r0, r1 +} diff --git a/plugins/inputs/system/net.go b/plugins/inputs/system/net.go index 3f89176fb874e..f47a2cc6c891c 100644 --- a/plugins/inputs/system/net.go +++ b/plugins/inputs/system/net.go @@ -105,6 +105,6 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("net", func() telegraf.Input { - return &NetIOStats{ps: &systemPS{}} + return &NetIOStats{ps: newSystemPS()} }) } diff --git a/plugins/inputs/system/netstat.go b/plugins/inputs/system/netstat.go index 98b729bbe715b..1699e08085d31 100644 --- a/plugins/inputs/system/netstat.go +++ b/plugins/inputs/system/netstat.go @@ -66,6 +66,6 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("netstat", func() telegraf.Input { - return &NetStats{ps: &systemPS{}} + return &NetStats{ps: newSystemPS()} }) } diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 20e01742a49e8..979a3b1645729 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -23,6 +23,13 @@ type PS interface { NetConnections() ([]net.ConnectionStat, error) } +type PSDiskDeps interface { + Partitions(all bool) ([]disk.PartitionStat, error) + OSGetenv(key string) string + OSStat(name string) (os.FileInfo, error) + PSDiskUsage(path string) (*disk.UsageStat, error) +} + func add(acc telegraf.Accumulator, name string, val float64, tags map[string]string) { if val >= 0 { @@ -30,7 +37,15 @@ func add(acc telegraf.Accumulator, } } -type systemPS struct{} +func newSystemPS() *systemPS { + return &systemPS{&systemPSDisk{}} +} + +type systemPS struct { + PSDiskDeps +} + +type systemPSDisk struct{} func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { var cpuTimes []cpu.TimesStat @@ -55,7 +70,7 @@ func (s *systemPS) DiskUsage( mountPointFilter []string, fstypeExclude []string, ) ([]*disk.UsageStat, []*disk.PartitionStat, error) { - parts, err := disk.Partitions(true) + parts, err := s.Partitions(true) if err != nil { return nil, nil, err } @@ -74,35 +89,34 @@ func (s *systemPS) DiskUsage( var partitions []*disk.PartitionStat for i := range parts { - p := parts[i] if len(mountPointFilter) > 0 { // If the mount point is not a member of the filter set, // don't gather info on it. - _, ok := mountPointFilterSet[p.Mountpoint] - if !ok { + if _, ok := mountPointFilterSet[p.Mountpoint]; !ok { continue } } - mountpoint := os.Getenv("HOST_MOUNT_PREFIX") + p.Mountpoint - if _, err := os.Stat(mountpoint); err == nil { - du, err := disk.Usage(mountpoint) - if err != nil { - return nil, nil, err - } - du.Path = p.Mountpoint - // If the mount point is a member of the exclude set, - // don't gather info on it. - _, ok := fstypeExcludeSet[p.Fstype] - if ok { - continue - } - du.Fstype = p.Fstype - usage = append(usage, du) - partitions = append(partitions, &p) + // If the mount point is a member of the exclude set, + // don't gather info on it. + if _, ok := fstypeExcludeSet[p.Fstype]; ok { + continue } + + mountpoint := s.OSGetenv("HOST_MOUNT_PREFIX") + p.Mountpoint + if _, err := s.OSStat(mountpoint); err != nil { + continue + } + du, err := s.PSDiskUsage(mountpoint) + if err != nil { + continue + } + du.Path = p.Mountpoint + du.Fstype = p.Fstype + usage = append(usage, du) + partitions = append(partitions, &p) } return usage, partitions, nil @@ -136,3 +150,19 @@ func (s *systemPS) VMStat() (*mem.VirtualMemoryStat, error) { func (s *systemPS) SwapStat() (*mem.SwapMemoryStat, error) { return mem.SwapMemory() } + +func (s *systemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) { + return disk.Partitions(all) +} + +func (s *systemPSDisk) OSGetenv(key string) string { + return os.Getenv(key) +} + +func (s *systemPSDisk) OSStat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (s *systemPSDisk) PSDiskUsage(path string) (*disk.UsageStat, error) { + return disk.Usage(path) +} From 2542ef6d6207941a0e2e8610d0add75db7d90ede Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 18 Apr 2017 16:00:41 -0400 Subject: [PATCH 073/201] change jolokia input to use bulk requests (#2253) --- CHANGELOG.md | 1 + plugins/inputs/jolokia/jolokia.go | 149 +++++++++++---------- plugins/inputs/jolokia/jolokia_test.go | 178 +++++++++++++++++-------- 3 files changed, 202 insertions(+), 126 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c7b7c2fd73a5..7437e4ad5896c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -72,6 +72,7 @@ be deprecated eventually. - [#1820](https://github.com/influxdata/telegraf/issues/1820): easier plugin testing without outputs - [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin - [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks +- [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests. ### Bugfixes diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 7f371c935549d..0a9122b8749e9 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -3,7 +3,6 @@ package jolokia import ( "bytes" "encoding/json" - "errors" "fmt" "io/ioutil" "net/http" @@ -130,7 +129,7 @@ func (j *Jolokia) Description() string { return "Read JMX metrics through Jolokia" } -func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) { +func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) { resp, err := j.jClient.MakeRequest(req) if err != nil { return nil, err @@ -155,85 +154,81 @@ func (j *Jolokia) doRequest(req *http.Request) (map[string]interface{}, error) { } // Unmarshal json - var jsonOut map[string]interface{} + var jsonOut []map[string]interface{} if err = json.Unmarshal([]byte(body), &jsonOut); err != nil { - return nil, errors.New("Error decoding JSON response") - } - - if status, ok := jsonOut["status"]; ok { - if status != float64(200) { - return nil, fmt.Errorf("Not expected status value in response body: %3.f", - status) - } - } else { - return nil, fmt.Errorf("Missing status in response body") + return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, body) } return jsonOut, nil } -func (j *Jolokia) prepareRequest(server Server, metric Metric) (*http.Request, error) { +func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request, error) { var jolokiaUrl *url.URL context := j.Context // Usually "/jolokia/" - // Create bodyContent - bodyContent := map[string]interface{}{ - "type": "read", - "mbean": metric.Mbean, - } + var bulkBodyContent []map[string]interface{} + for _, metric := range metrics { + // Create bodyContent + bodyContent := map[string]interface{}{ + "type": "read", + "mbean": metric.Mbean, + } - if metric.Attribute != "" { - bodyContent["attribute"] = metric.Attribute - if metric.Path != "" { - bodyContent["path"] = metric.Path + if metric.Attribute != "" { + bodyContent["attribute"] = metric.Attribute + if metric.Path != "" { + bodyContent["path"] = metric.Path + } } - } - // Add target, only in proxy mode - if j.Mode == "proxy" { - serviceUrl := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi", - server.Host, server.Port) + // Add target, only in proxy mode + if j.Mode == "proxy" { + serviceUrl := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi", + server.Host, server.Port) - target := map[string]string{ - "url": serviceUrl, - } + target := map[string]string{ + "url": serviceUrl, + } - if server.Username != "" { - target["user"] = server.Username - } + if server.Username != "" { + target["user"] = server.Username + } - if server.Password != "" { - target["password"] = server.Password - } + if server.Password != "" { + target["password"] = server.Password + } - bodyContent["target"] = target + bodyContent["target"] = target - proxy := j.Proxy + proxy := j.Proxy - // Prepare ProxyURL - proxyUrl, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context) - if err != nil { - return nil, err - } - if proxy.Username != "" || proxy.Password != "" { - proxyUrl.User = url.UserPassword(proxy.Username, proxy.Password) - } + // Prepare ProxyURL + proxyUrl, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context) + if err != nil { + return nil, err + } + if proxy.Username != "" || proxy.Password != "" { + proxyUrl.User = url.UserPassword(proxy.Username, proxy.Password) + } - jolokiaUrl = proxyUrl + jolokiaUrl = proxyUrl - } else { - serverUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context) - if err != nil { - return nil, err - } - if server.Username != "" || server.Password != "" { - serverUrl.User = url.UserPassword(server.Username, server.Password) + } else { + serverUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context) + if err != nil { + return nil, err + } + if server.Username != "" || server.Password != "" { + serverUrl.User = url.UserPassword(server.Username, server.Password) + } + + jolokiaUrl = serverUrl } - jolokiaUrl = serverUrl + bulkBodyContent = append(bulkBodyContent, bodyContent) } - requestBody, err := json.Marshal(bodyContent) + requestBody, err := json.Marshal(bulkBodyContent) req, err := http.NewRequest("POST", jolokiaUrl.String(), bytes.NewBuffer(requestBody)) @@ -276,25 +271,35 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { tags["jolokia_host"] = server.Host fields := make(map[string]interface{}) - for _, metric := range metrics { - measurement := metric.Name + req, err := j.prepareRequest(server, metrics) + if err != nil { + acc.AddError(fmt.Errorf("unable to create request: %s", err)) + continue + } + out, err := j.doRequest(req) + if err != nil { + acc.AddError(fmt.Errorf("error performing request: %s", err)) + continue + } - req, err := j.prepareRequest(server, metric) - if err != nil { - return err + if len(out) != len(metrics) { + acc.AddError(fmt.Errorf("did not receive the correct number of metrics in response. expected %d, received %d", len(metrics), len(out))) + continue + } + for i, resp := range out { + if status, ok := resp["status"]; ok && status != float64(200) { + acc.AddError(fmt.Errorf("Not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f", + server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status)) + continue + } else if !ok { + acc.AddError(fmt.Errorf("Missing status in response body")) + continue } - out, err := j.doRequest(req) - - if err != nil { - fmt.Printf("Error handling response: %s\n", err) + if values, ok := resp["value"]; ok { + j.extractValues(metrics[i].Name, values, fields) } else { - if values, ok := out["value"]; ok { - j.extractValues(measurement, values, fields) - } else { - fmt.Printf("Missing key 'value' in output response\n") - } - + acc.AddError(fmt.Errorf("Missing key 'value' in output response\n")) } } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index 3c4fc25618216..cf415f36f6efb 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -13,65 +13,105 @@ import ( ) const validThreeLevelMultiValueJSON = ` -{ - "request":{ - "mbean":"java.lang:type=*", - "type":"read" - }, - "value":{ - "java.lang:type=Memory":{ - "ObjectPendingFinalizationCount":0, - "Verbose":false, - "HeapMemoryUsage":{ - "init":134217728, - "committed":173015040, - "max":1908932608, - "used":16840016 - }, - "NonHeapMemoryUsage":{ - "init":2555904, - "committed":51380224, - "max":-1, - "used":49944048 - }, - "ObjectName":{ - "objectName":"java.lang:type=Memory" - } - } +[ + { + "request":{ + "mbean":"java.lang:type=*", + "type":"read" + }, + "value":{ + "java.lang:type=Memory":{ + "ObjectPendingFinalizationCount":0, + "Verbose":false, + "HeapMemoryUsage":{ + "init":134217728, + "committed":173015040, + "max":1908932608, + "used":16840016 + }, + "NonHeapMemoryUsage":{ + "init":2555904, + "committed":51380224, + "max":-1, + "used":49944048 + }, + "ObjectName":{ + "objectName":"java.lang:type=Memory" + } + } + }, + "timestamp":1446129191, + "status":200 + } +]` + +const validBulkResponseJSON = ` +[ + { + "request":{ + "mbean":"java.lang:type=Memory", + "attribute":"HeapMemoryUsage", + "type":"read" + }, + "value":{ + "init":67108864, + "committed":456130560, + "max":477626368, + "used":203288528 + }, + "timestamp":1446129191, + "status":200 }, - "timestamp":1446129191, - "status":200 -}` + { + "request":{ + "mbean":"java.lang:type=Memory", + "attribute":"NonHeapMemoryUsage", + "type":"read" + }, + "value":{ + "init":2555904, + "committed":51380224, + "max":-1, + "used":49944048 + }, + "timestamp":1446129191, + "status":200 + } +]` const validMultiValueJSON = ` -{ - "request":{ - "mbean":"java.lang:type=Memory", - "attribute":"HeapMemoryUsage", - "type":"read" - }, - "value":{ - "init":67108864, - "committed":456130560, - "max":477626368, - "used":203288528 - }, - "timestamp":1446129191, - "status":200 -}` +[ + { + "request":{ + "mbean":"java.lang:type=Memory", + "attribute":"HeapMemoryUsage", + "type":"read" + }, + "value":{ + "init":67108864, + "committed":456130560, + "max":477626368, + "used":203288528 + }, + "timestamp":1446129191, + "status":200 + } +]` const validSingleValueJSON = ` -{ - "request":{ - "path":"used", - "mbean":"java.lang:type=Memory", - "attribute":"HeapMemoryUsage", - "type":"read" - }, - "value":209274376, - "timestamp":1446129256, - "status":200 -}` +[ + { + "request":{ + "path":"used", + "mbean":"java.lang:type=Memory", + "attribute":"HeapMemoryUsage", + "type":"read" + }, + "value":209274376, + "timestamp":1446129256, + "status":200 + } +]` const invalidJSON = "I don't think this is JSON" @@ -82,6 +122,8 @@ var HeapMetric = Metric{Name: "heap_memory_usage", Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"} var UsedHeapMetric = Metric{Name: "heap_memory_usage", Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"} +var NonHeapMetric = Metric{Name: "non_heap_memory_usage", + Mbean: "java.lang:type=Memory", Attribute: "NonHeapMemoryUsage"} type jolokiaClientStub struct { responseBody string @@ -135,6 +177,34 @@ func TestHttpJsonMultiValue(t *testing.T) { acc.AssertContainsTaggedFields(t, "jolokia", fields, tags) } +// Test that bulk responses are handled +func TestHttpJsonBulkResponse(t *testing.T) { + jolokia := genJolokiaClientStub(validBulkResponseJSON, 200, Servers, []Metric{HeapMetric, NonHeapMetric}) + + var acc testutil.Accumulator + err := jolokia.Gather(&acc) + + assert.Nil(t, err) + assert.Equal(t, 1, len(acc.Metrics)) + + fields := map[string]interface{}{ + "heap_memory_usage_init": 67108864.0, + "heap_memory_usage_committed": 456130560.0, + "heap_memory_usage_max": 477626368.0, + "heap_memory_usage_used": 203288528.0, + "non_heap_memory_usage_init": 2555904.0, + "non_heap_memory_usage_committed": 51380224.0, + "non_heap_memory_usage_max": -1.0, + "non_heap_memory_usage_used": 49944048.0, + } + tags := map[string]string{ + "jolokia_host": "127.0.0.1", + "jolokia_port": "8080", + "jolokia_name": "as1", + } + acc.AssertContainsTaggedFields(t, "jolokia", fields, tags) +} + // Test that the proper values are ignored or collected func TestHttpJsonThreeLevelMultiValue(t *testing.T) { jolokia := genJolokiaClientStub(validThreeLevelMultiValueJSON, 200, Servers, []Metric{HeapMetric}) From 3690e1b9bf0e222f544f1b5b2e08dea7b65ccf84 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Apr 2017 13:42:24 -0700 Subject: [PATCH 074/201] Add diskio for darwin to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7437e4ad5896c..c32367cb4663b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ be deprecated eventually. - [#2493](https://github.com/influxdata/telegraf/pull/2493): Check signature in the GitHub webhook plugin - [#2038](https://github.com/influxdata/telegraf/issues/2038): Add papertrail support to webhooks - [#2253](https://github.com/influxdata/telegraf/pull/2253): Change jolokia plugin to use bulk requests. +- [#2575](https://github.com/influxdata/telegraf/issues/2575) Add diskio input for Darwin ### Bugfixes From bf30ef89ee013b59d0fd2638055180b58043b374 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 19 Apr 2017 17:02:44 -0700 Subject: [PATCH 075/201] Fix ipmi_sensor config is shared between all plugin instances (#2684) --- CHANGELOG.md | 1 + plugins/inputs/ipmi_sensor/ipmi.go | 1 + 2 files changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c32367cb4663b..6160d7558376b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -107,6 +107,7 @@ be deprecated eventually. - [#2628](https://github.com/influxdata/telegraf/issues/2628): Set default measurement name for snmp input. - [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks - [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects` +- [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index 0114812d3800a..a3beeb29702f8 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -152,6 +152,7 @@ func init() { m.Path = path } inputs.Add("ipmi_sensor", func() telegraf.Input { + m := m return &m }) } From 748ca7d50377217378920bf36bbffced964a369f Mon Sep 17 00:00:00 2001 From: Martin Date: Thu, 20 Apr 2017 20:19:33 +0200 Subject: [PATCH 076/201] Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems (#2360) --- CHANGELOG.md | 1 + scripts/post-install.sh | 51 +++++++++++++++++++++++--------------- scripts/post-remove.sh | 55 +++++++++++++++++++++++++++-------------- scripts/pre-install.sh | 16 ++++++------ scripts/pre-remove.sh | 9 +++---- 5 files changed, 81 insertions(+), 51 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6160d7558376b..b19c3785a51d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,7 @@ be deprecated eventually. - [#2356](https://github.com/influxdata/telegraf/issues/2356): cpu input panic when /proc/stat is empty. - [#2341](https://github.com/influxdata/telegraf/issues/2341): telegraf swallowing panics in --test mode. - [#2358](https://github.com/influxdata/telegraf/pull/2358): Create pidfile with 644 permissions & defer file deletion. +- [#2360](https://github.com/influxdata/telegraf/pull/2360): Fixed install/remove of telegraf on non-systemd Debian/Ubuntu systems - [#2282](https://github.com/influxdata/telegraf/issues/2282): Reloading telegraf freezes prometheus output. - [#2390](https://github.com/influxdata/telegraf/issues/2390): Empty tag value causes error on InfluxDB output. - [#2380](https://github.com/influxdata/telegraf/issues/2380): buffer_size field value is negative number from "internal" plugin. diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 45a19d26c4bac..2baabe69af212 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -24,10 +24,8 @@ function install_chkconfig { chkconfig --add telegraf } -id telegraf &>/dev/null -if [[ $? -ne 0 ]]; then - grep "^telegraf:" /etc/group &>/dev/null - if [[ $? -ne 0 ]]; then +if ! id telegraf &>/dev/null; then + if ! grep "^telegraf:" /etc/group &>/dev/null; then useradd -r -K USERGROUPS_ENAB=yes -M telegraf -s /bin/false -d /etc/telegraf else useradd -r -K USERGROUPS_ENAB=yes -M telegraf -s /bin/false -d /etc/telegraf -g telegraf @@ -60,31 +58,44 @@ fi # Distribution-specific logic if [[ -f /etc/redhat-release ]]; then # RHEL-variant logic - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - install_systemd + if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then + install_systemd else - # Assuming sysv - install_init - install_chkconfig + # Assuming SysVinit + install_init + # Run update-rc.d or fallback to chkconfig if not available + if which update-rc.d &>/dev/null; then + install_update_rcd + else + install_chkconfig + fi fi elif [[ -f /etc/debian_version ]]; then # Debian/Ubuntu logic - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - install_systemd - systemctl restart telegraf || echo "WARNING: systemd not running." + if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then + install_systemd + systemctl restart telegraf || echo "WARNING: systemd not running." else - # Assuming sysv - install_init - install_update_rcd - invoke-rc.d telegraf restart + # Assuming SysVinit + install_init + # Run update-rc.d or fallback to chkconfig if not available + if which update-rc.d &>/dev/null; then + install_update_rcd + else + install_chkconfig + fi + invoke-rc.d telegraf restart fi elif [[ -f /etc/os-release ]]; then source /etc/os-release if [[ $ID = "amzn" ]]; then # Amazon Linux logic - install_init - install_chkconfig + install_init + # Run update-rc.d or fallback to chkconfig if not available + if which update-rc.d &>/dev/null; then + install_update_rcd + else + install_chkconfig + fi fi fi diff --git a/scripts/post-remove.sh b/scripts/post-remove.sh index 0f262d2252313..b66a3aa9a2e3c 100644 --- a/scripts/post-remove.sh +++ b/scripts/post-remove.sh @@ -15,28 +15,45 @@ function disable_chkconfig { rm -f /etc/init.d/telegraf } -if [[ "$1" == "0" ]]; then - # RHEL and any distribution that follow RHEL, Amazon Linux covered - # telegraf is no longer installed, remove from init system - rm -f /etc/default/telegraf +if [[ -f /etc/redhat-release ]]; then + # RHEL-variant logic + if [[ "$1" = "0" ]]; then + # InfluxDB is no longer installed, remove from init system + rm -f /etc/default/telegraf - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - disable_systemd - else - # Assuming sysv - disable_chkconfig + if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then + disable_systemd + else + # Assuming sysv + disable_chkconfig + fi fi -elif [ "$1" == "remove" -o "$1" == "purge" ]; then +elif [[ -f /etc/debian_version ]]; then # Debian/Ubuntu logic - # Remove/purge - rm -f /etc/default/telegraf + if [ "$1" == "remove" -o "$1" == "purge" ]; then + # Remove/purge + rm -f /etc/default/telegraf - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - disable_systemd - else - # Assuming sysv - disable_update_rcd + if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then + disable_systemd + else + # Assuming sysv + # Run update-rc.d or fallback to chkconfig if not available + if which update-rc.d &>/dev/null; then + disable_update_rcd + else + disable_chkconfig + fi + fi + fi +elif [[ -f /etc/os-release ]]; then + source /etc/os-release + if [[ $ID = "amzn" ]]; then + # Amazon Linux logic + if [[ "$1" = "0" ]]; then + # InfluxDB is no longer installed, remove from init system + rm -f /etc/default/telegraf + disable_chkconfig + fi fi fi diff --git a/scripts/pre-install.sh b/scripts/pre-install.sh index 443d6bc878868..b371f462d36f4 100644 --- a/scripts/pre-install.sh +++ b/scripts/pre-install.sh @@ -1,14 +1,16 @@ #!/bin/bash -if [[ -f /etc/opt/telegraf/telegraf.conf ]]; then +if [[ -d /etc/opt/telegraf ]]; then # Legacy configuration found if [[ ! -d /etc/telegraf ]]; then - # New configuration does not exist, move legacy configuration to new location - echo -e "Please note, Telegraf's configuration is now located at '/etc/telegraf' (previously '/etc/opt/telegraf')." - mv /etc/opt/telegraf /etc/telegraf + # New configuration does not exist, move legacy configuration to new location + echo -e "Please note, Telegraf's configuration is now located at '/etc/telegraf' (previously '/etc/opt/telegraf')." + mv -vn /etc/opt/telegraf /etc/telegraf - backup_name="telegraf.conf.$(date +%s).backup" - echo "A backup of your current configuration can be found at: /etc/telegraf/$backup_name" - cp -a /etc/telegraf/telegraf.conf /etc/telegraf/$backup_name + if [[ -f /etc/telegraf/telegraf.conf ]]; then + backup_name="telegraf.conf.$(date +%s).backup" + echo "A backup of your current configuration can be found at: /etc/telegraf/${backup_name}" + cp -a "/etc/telegraf/telegraf.conf" "/etc/telegraf/${backup_name}" + fi fi fi diff --git a/scripts/pre-remove.sh b/scripts/pre-remove.sh index a5718463025c6..2887fc9b624c5 100644 --- a/scripts/pre-remove.sh +++ b/scripts/pre-remove.sh @@ -5,11 +5,10 @@ BIN_DIR=/usr/bin # Distribution-specific logic if [[ -f /etc/debian_version ]]; then # Debian/Ubuntu logic - which systemctl &>/dev/null - if [[ $? -eq 0 ]]; then - deb-systemd-invoke stop telegraf.service + if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then + deb-systemd-invoke stop telegraf.service else - # Assuming sysv - invoke-rc.d telegraf stop + # Assuming sysv + invoke-rc.d telegraf stop fi fi From b03d78d00f4401950f2863506a2c12afcade4473 Mon Sep 17 00:00:00 2001 From: Oleg Grytsynevych Date: Thu, 20 Apr 2017 20:22:44 +0200 Subject: [PATCH 077/201] win_perf_counters: Format errors reported by pdh.dll in human-readable format (#2338) --- plugins/inputs/win_perf_counters/pdh.go | 13 +++++++++++++ .../inputs/win_perf_counters/win_perf_counters.go | 9 ++++----- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index fa00e0603f79b..2caa214451721 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -33,8 +33,11 @@ package win_perf_counters import ( + "fmt" "syscall" "unsafe" + + "golang.org/x/sys/windows" ) // Error codes @@ -417,3 +420,13 @@ func UTF16PtrToString(s *uint16) string { } return syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(s))[0:]) } + +func PdhFormatError(msgId uint32) string { + var flags uint32 = windows.FORMAT_MESSAGE_FROM_HMODULE | windows.FORMAT_MESSAGE_ARGUMENT_ARRAY | windows.FORMAT_MESSAGE_IGNORE_INSERTS + buf := make([]uint16, 300) + _, err := windows.FormatMessage(flags, uintptr(libpdhDll.Handle), msgId, 0, buf, nil) + if err == nil { + return fmt.Sprintf("%s", UTF16PtrToString(&buf[0])) + } + return fmt.Sprintf("(pdhErr=%d) %s", msgId, err.Error()) +} diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 5365dc68bb043..3cc9466691ab1 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -12,7 +12,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -var sampleConfig string = ` +var sampleConfig = ` ## By default this plugin returns basic CPU and Disk statistics. ## See the README file for more examples. ## Uncomment examples below or write your own as you see fit. If the system @@ -124,8 +124,8 @@ func (m *Win_PerfCounters) AddItem(metrics *itemList, query string, objectName s // Call PdhCollectQueryData one time to check existance of the counter ret = PdhCollectQueryData(handle) if ret != ERROR_SUCCESS { - ret = PdhCloseQuery(handle) - return errors.New("Invalid query for Performance Counters") + PdhCloseQuery(handle) + return errors.New(PdhFormatError(ret)) } temp := &item{query, objectName, counter, instance, measurement, @@ -174,7 +174,7 @@ func (m *Win_PerfCounters) ParseConfig(metrics *itemList) error { } } else { if PerfObject.FailOnMissing || PerfObject.WarnOnMissing { - fmt.Printf("Invalid query: %s\n", query) + fmt.Printf("Invalid query: '%s'. Error: %s", query, err.Error()) } if PerfObject.FailOnMissing { return err @@ -298,7 +298,6 @@ func (m *Win_PerfCounters) Gather(acc telegraf.Accumulator) error { bufCount = 0 bufSize = 0 } - } } From a2373019324322f768ec382435fe425ed4d8bdcb Mon Sep 17 00:00:00 2001 From: Alexander Blagoev Date: Thu, 20 Apr 2017 21:25:22 +0300 Subject: [PATCH 078/201] Memcached input documentation (#2685) Closes #2615 --- plugins/inputs/memcached/README.md | 69 ++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 plugins/inputs/memcached/README.md diff --git a/plugins/inputs/memcached/README.md b/plugins/inputs/memcached/README.md new file mode 100644 index 0000000000000..ed4ebe7ff2e6b --- /dev/null +++ b/plugins/inputs/memcached/README.md @@ -0,0 +1,69 @@ +# Memcached Input Plugin + +This plugin gathers statistics data from a Memcached server. + +### Configuration: + +```toml +# Read metrics from one or many memcached servers. +[[inputs.memcached]] + # An array of address to gather stats about. Specify an ip on hostname + # with optional port. ie localhost, 10.0.0.1:11211, etc. + servers = ["localhost:11211"] + # An array of unix memcached sockets to gather stats about. + # unix_sockets = ["/var/run/memcached.sock"] +``` + +### Measurements & Fields: + +The fields from this plugin are gathered in the *memcached* measurement. + +Fields: + +* get_hits - Number of keys that have been requested and found present +* get_misses - Number of items that have been requested and not found +* evictions - Number of valid items removed from cache to free memory for new items +* limit_maxbytes - Number of bytes this server is allowed to use for storage +* bytes - Current number of bytes used to store items +* uptime - Number of secs since the server started +* curr_items - Current number of items stored +* total_items - Total number of items stored since the server started +* curr_connections - Number of open connections +* total_connections - Total number of connections opened since the server started running +* connection_structures - Number of connection structures allocated by the server +* cmd_get - Cumulative number of retrieval reqs +* cmd_set - Cumulative number of storage reqs +* delete_hits - Number of deletion reqs resulting in an item being removed +* delete_misses - umber of deletions reqs for missing keys +* incr_hits - Number of successful incr reqs +* incr_misses - Number of incr reqs against missing keys +* decr_hits - Number of successful decr reqs +* decr_misses - Number of decr reqs against missing keys +* cas_hits - Number of successful CAS reqs +* cas_misses - Number of CAS reqs against missing keys +* bytes_read - Total number of bytes read by this server from network +* bytes_written - Total number of bytes sent by this server to network +* threads - Number of worker threads requested +* conn_yields - Number of times any connection yielded to another due to hitting the -R limit + +Description of gathered fields taken from [here](https://github.com/memcached/memcached/blob/master/doc/protocol.txt). + +### Tags: + +* Memcached measurements have the following tags: + - server (the host name from which metrics are gathered) + +### Sample Queries: + +You can use the following query to get the average get hit and miss ratio, as well as the total average size of cached items, number of cached items and average connection counts per server. + +``` +SELECT mean(get_hits) / mean(cmd_get) as get_ratio, mean(get_misses) / mean(cmd_get) as get_misses_ratio, mean(bytes), mean(curr_items), mean(curr_connections) FROM memcached WHERE time > now() - 1h GROUP BY server +``` + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter memcached -test +memcached,server=localhost:11211 get_hits=1,get_misses=2,evictions=0,limit_maxbytes=0,bytes=10,uptime=3600,curr_items=2,total_items=2,curr_connections=1,total_connections=2,connection_structures=1,cmd_get=2,cmd_set=1,delete_hits=0,delete_misses=0,incr_hits=0,incr_misses=0,decr_hits=0,decr_misses=0,cas_hits=0,cas_misses=0,bytes_read=10,bytes_written=10,threads=1,conn_yields=0 1453831884664956455 +``` From 799c8bed299d3e897209c65b7d5e0015db051689 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Apr 2017 15:33:54 -0700 Subject: [PATCH 079/201] Add fix for network aliases to changelog Change was made in gopsutil --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b19c3785a51d0..626a9ef936926 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -109,6 +109,7 @@ be deprecated eventually. - [#2649](https://github.com/influxdata/telegraf/pull/2649): Improve performance of diskio with many disks - [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects` - [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances +- [#2450](https://github.com/influxdata/telegraf/issues/2450): Network statistics not collected when system has alias interfaces ## v1.2.1 [2017-02-01] From 38e1c1de779dfdd34e6952c1f8e9ca0a234927e6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 20 Apr 2017 16:29:39 -0700 Subject: [PATCH 080/201] Update commit hash of tail fork --- Godeps | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Godeps b/Godeps index 510f5b1ed00f5..9ffd7e1b82659 100644 --- a/Godeps +++ b/Godeps @@ -22,7 +22,7 @@ github.com/golang/snappy 7db9049039a047d955fe8c19b83c8ff5abd765c7 github.com/gorilla/mux 392c28fe23e1c45ddba891b0320b3b5df220beea github.com/hailocab/go-hostpool e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/hashicorp/consul 63d2fc68239b996096a1c55a0d4b400ea4c2583f -github.com/influxdata/tail e9ef7e826dafcb3093b40b989fefa90eeb9a8ca1 +github.com/influxdata/tail a395bf99fe07c233f41fba0735fa2b13b58588ea github.com/influxdata/toml 5d1d907f22ead1cd47adde17ceec5bda9cacaf8f github.com/influxdata/wlog 7c63b0a71ef8300adc255344d275e10e5c3a71ec github.com/jackc/pgx b84338d7d62598f75859b2b146d830b22f1b9ec8 From da0773151b970a342e1b2a84dab52a1ca20b669a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 21 Apr 2017 10:55:54 -0700 Subject: [PATCH 081/201] Use C locale when running sadf (#2690) fixes #1911 --- CHANGELOG.md | 1 + plugins/inputs/sysstat/sysstat.go | 26 +++++++++++++++++++ .../inputs/sysstat/sysstat_interval_test.go | 3 +++ 3 files changed, 30 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 626a9ef936926..c2ddc9d27576c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ be deprecated eventually. - [#2671](https://github.com/influxdata/telegraf/issues/2671): The internal input plugin uses the wrong units for `heap_objects` - [#2684](https://github.com/influxdata/telegraf/pull/2684): Fix ipmi_sensor config is shared between all plugin instances - [#2450](https://github.com/influxdata/telegraf/issues/2450): Network statistics not collected when system has alias interfaces +- [#1911](https://github.com/influxdata/telegraf/issues/1911): Sysstat plugin needs LANG=C or similar locale ## v1.2.1 [2017-02-01] diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 9c9ef6b05f347..27e18100263fc 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -210,11 +210,37 @@ func (s *Sysstat) collect() error { return nil } +func filterEnviron(env []string, prefix string) []string { + newenv := env[:0] + for _, envvar := range env { + if !strings.HasPrefix(envvar, prefix) { + newenv = append(newenv, envvar) + } + } + return newenv +} + +// Return the Cmd with its environment configured to use the C locale +func withCLocale(cmd *exec.Cmd) *exec.Cmd { + var env []string + if cmd.Env != nil { + env = cmd.Env + } else { + env = os.Environ() + } + env = filterEnviron(env, "LANG") + env = filterEnviron(env, "LC_") + env = append(env, "LANG=C") + cmd.Env = env + return cmd +} + // parse runs Sadf on the previously saved tmpFile: // Sadf -p -- -p