-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
collector.go
171 lines (146 loc) · 4.45 KB
/
collector.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
/*
*
* k6 - a next-generation load testing tool
* Copyright (C) 2016 Load Impact
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package kafka
import (
"context"
"encoding/json"
"sync"
"time"
"github.com/Shopify/sarama"
"github.com/loadimpact/k6/lib"
"github.com/loadimpact/k6/stats"
"github.com/loadimpact/k6/stats/influxdb"
jsonc "github.com/loadimpact/k6/stats/json"
log "github.com/sirupsen/logrus"
)
// Collector implements the lib.Collector interface and should be used only for testing
type Collector struct {
Producer sarama.SyncProducer
Config Config
Samples []stats.Sample
lock sync.Mutex
}
// New creates an instance of the collector
func New(conf Config) (*Collector, error) {
producer, err := sarama.NewSyncProducer(conf.Brokers, nil)
if err != nil {
return nil, err
}
return &Collector{
Producer: producer,
Config: conf,
}, nil
}
// Init does nothing, it's only included to satisfy the lib.Collector interface
func (c *Collector) Init() error { return nil }
// Run just blocks until the context is done
func (c *Collector) Run(ctx context.Context) {
log.Debug("Kafka: Running!")
ticker := time.NewTicker(time.Duration(c.Config.PushInterval.Duration))
for {
select {
case <-ticker.C:
c.pushMetrics()
case <-ctx.Done():
c.pushMetrics()
err := c.Producer.Close()
if err != nil {
log.WithError(err).Error("Kafka: Failed to close producer.")
}
return
}
}
}
// Collect just appends all of the samples passed to it to the internal sample slice.
// According to the the lib.Collector interface, it should never be called concurrently,
// so there's no locking on purpose - that way Go's race condition detector can actually
// detect incorrect usage.
// Also, theoretically the collector doesn't have to actually Run() before samples start
// being collected, it only has to be initialized.
func (c *Collector) Collect(scs []stats.SampleContainer) {
c.lock.Lock()
for _, sc := range scs {
c.Samples = append(c.Samples, sc.GetSamples()...)
}
c.lock.Unlock()
}
// Link returns a dummy string, it's only included to satisfy the lib.Collector interface
func (c *Collector) Link() string {
return ""
}
// GetRequiredSystemTags returns which sample tags are needed by this collector
func (c *Collector) GetRequiredSystemTags() lib.TagSet {
return lib.TagSet{} // There are no required tags for this collector
}
// SetRunStatus does nothing in the Kafka collector
func (c *Collector) SetRunStatus(status lib.RunStatus) {}
func (c *Collector) formatSamples(samples stats.Samples) ([]string, error) {
var metrics []string
switch c.Config.Format.String {
case "influxdb":
i, err := influxdb.New(c.Config.InfluxDBConfig)
if err != nil {
return nil, err
}
metrics, err = i.Format(samples)
if err != nil {
return nil, err
}
default:
for _, sample := range samples {
env := jsonc.WrapSample(&sample)
metric, err := json.Marshal(env)
if err != nil {
return nil, err
}
metrics = append(metrics, string(metric))
}
}
return metrics, nil
}
func (c *Collector) pushMetrics() {
startTime := time.Now()
c.lock.Lock()
samples := c.Samples
c.Samples = nil
c.lock.Unlock()
// Format the samples
formattedSamples, err := c.formatSamples(samples)
if err != nil {
log.WithError(err).Error("Kafka: Couldn't format the samples")
return
}
// Send the samples
log.Debug("Kafka: Delivering...")
for _, sample := range formattedSamples {
msg := &sarama.ProducerMessage{Topic: c.Config.Topic.String, Value: sarama.StringEncoder(sample)}
partition, offset, err := c.Producer.SendMessage(msg)
if err != nil {
log.WithError(err).Error("Kafka: failed to send message.")
} else {
log.WithFields(log.Fields{
"partition": partition,
"offset": offset,
}).Debug("Kafka: message sent.")
}
}
t := time.Since(startTime)
log.WithField("t", t).Debug("Kafka: Delivered!")
}