forked from influxdata/telegraf
-
Notifications
You must be signed in to change notification settings - Fork 3
/
json_stats.go
176 lines (146 loc) · 4.35 KB
/
json_stats.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
package bind
import (
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/metric"
)
type jsonStats struct {
OpCodes map[string]int
QTypes map[string]int
RCodes map[string]int
ZoneStats map[string]int
NSStats map[string]int
SockStats map[string]int
Views map[string]jsonView
Memory jsonMemory
}
type jsonMemory struct {
TotalUse int64
InUse int64
BlockSize int64
ContextSize int64
Lost int64
Contexts []struct {
Id string
Name string
Total int64
InUse int64
}
}
type jsonView struct {
Resolver map[string]map[string]int
}
// addJSONCounter adds a counter array to a Telegraf Accumulator, with the specified tags.
func addJSONCounter(acc telegraf.Accumulator, commonTags map[string]string, stats map[string]int) {
grouper := metric.NewSeriesGrouper()
ts := time.Now()
for name, value := range stats {
if commonTags["type"] == "opcode" && strings.HasPrefix(name, "RESERVED") {
continue
}
tags := make(map[string]string)
// Create local copy of tags since maps are reference types
for k, v := range commonTags {
tags[k] = v
}
grouper.Add("bind_counter", tags, ts, name, value)
}
//Add grouped metrics
for _, metric := range grouper.Metrics() {
acc.AddMetric(metric)
}
}
// addStatsJson walks a jsonStats struct and adds the values to the telegraf.Accumulator.
func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag string) {
grouper := metric.NewSeriesGrouper()
ts := time.Now()
tags := map[string]string{"url": urlTag}
host, port, _ := net.SplitHostPort(urlTag)
tags["source"] = host
tags["port"] = port
// Opcodes
tags["type"] = "opcode"
addJSONCounter(acc, tags, stats.OpCodes)
// RCodes stats
tags["type"] = "rcode"
addJSONCounter(acc, tags, stats.RCodes)
// Query RDATA types
tags["type"] = "qtype"
addJSONCounter(acc, tags, stats.QTypes)
// Nameserver stats
tags["type"] = "nsstat"
addJSONCounter(acc, tags, stats.NSStats)
// Socket statistics
tags["type"] = "sockstat"
addJSONCounter(acc, tags, stats.SockStats)
// Zonestats
tags["type"] = "zonestat"
addJSONCounter(acc, tags, stats.ZoneStats)
// Memory stats
fields := map[string]interface{}{
"total_use": stats.Memory.TotalUse,
"in_use": stats.Memory.InUse,
"block_size": stats.Memory.BlockSize,
"context_size": stats.Memory.ContextSize,
"lost": stats.Memory.Lost,
}
acc.AddGauge("bind_memory", fields, map[string]string{"url": urlTag, "source": host, "port": port})
// Detailed, per-context memory stats
if b.GatherMemoryContexts {
for _, c := range stats.Memory.Contexts {
tags := map[string]string{"url": urlTag, "id": c.Id, "name": c.Name, "source": host, "port": port}
fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse}
acc.AddGauge("bind_memory_context", fields, tags)
}
}
// Detailed, per-view stats
if b.GatherViews {
for vName, view := range stats.Views {
for cntrType, counters := range view.Resolver {
for cntrName, value := range counters {
tags := map[string]string{
"url": urlTag,
"source": host,
"port": port,
"view": vName,
"type": cntrType,
}
grouper.Add("bind_counter", tags, ts, cntrName, value)
}
}
}
}
//Add grouped metrics
for _, metric := range grouper.Metrics() {
acc.AddMetric(metric)
}
}
// readStatsJSON takes a base URL to probe, and requests the individual statistics blobs that we
// are interested in. These individual blobs have a combined size which is significantly smaller
// than if we requested everything at once (e.g. taskmgr and socketmgr can be omitted).
func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error {
var stats jsonStats
// Progressively build up full jsonStats struct by parsing the individual HTTP responses
for _, suffix := range [...]string{"/server", "/net", "/mem"} {
scrapeUrl := addr.String() + suffix
resp, err := client.Get(scrapeUrl)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status)
}
if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil {
return fmt.Errorf("Unable to decode JSON blob: %s", err)
}
}
b.addStatsJSON(stats, acc, addr.Host)
return nil
}