forked from kata-containers/tests
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.go
216 lines (179 loc) · 5.39 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
// Copyright (c) 2017-2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
/*
Program checkmetrics compares the results from a set of metrics
results, stored in JSON files, against a set of baseline metrics
'expectations', defined in a TOML file.
It returns non zero if any of the TOML metrics are not met.
It prints out a tabluated report summary at the end of the run.
*/
package main
import (
"errors"
"fmt"
"os"
"path"
"github.com/olekukonko/tablewriter"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// name is the name of the program.
const name = "checkmetrics"
// usage is the usage of the program.
const usage = name + ` checks JSON metrics results against a TOML baseline`
var (
// The TOML basefile
ciBasefile *baseFile
// If set then we show results as a relative percentage (to the baseline)
showPercentage = false
// System default path for baseline file
// the value will be set by Makefile
sysBaseFile string
)
// processMetricsBaseline locates the files matching each entry in the TOML
// baseline, loads and processes it, and checks if the metrics were in range.
// Finally it generates a summary report
func processMetricsBaseline(context *cli.Context) (err error) {
var report [][]string // summary report table
var passes int
var fails int
var summary []string
log.Debug("processMetricsBaseline")
// Process each Metrics TOML entry one at a time
// FIXME - this is not structured to be testable - if you need to add a unit
// test here - the *please* re-structure these funcs etc.
for _, m := range ciBasefile.Metric {
log.Debugf("Processing %s", m.Name)
fullpath := path.Join(context.GlobalString("metricsdir"), m.Name)
switch m.Type {
case "":
log.Debugf("No Type, default to JSON for [%s]", m.Name)
fallthrough
case "json":
{
var thisJSON jsonRecord
log.Debug("Process a JSON")
fullpath = fullpath + ".json"
log.Debugf("Fullpath %s", fullpath)
err = thisJSON.load(fullpath, &m)
if err != nil {
log.Warnf("[%s][%v]", fullpath, err)
// Record that this one did not complete successfully
fails++
// Make some sort of note in the summary table that this failed
summary = (&metricsCheck{}).genErrorLine(false, m.Name, "Failed to load JSON", fmt.Sprintf("%s", err))
// Not a fatal error - continue to process any remaining files
break
}
summary, err = (&metricsCheck{}).checkstats(m)
if err != nil {
log.Warnf("Check for [%s] failed [%v]", m.Name, err)
log.Warnf(" with [%s]", summary)
fails++
} else {
log.Debugf("Check for [%s] passed", m.Name)
log.Debugf(" with [%s]", summary)
passes++
}
}
default:
{
log.Warnf("Unknown type [%s] for metric [%s]", m.Type, m.Name)
summary = (&metricsCheck{}).genErrorLine(false, m.Name, "Unsupported Type", fmt.Sprint(m.Type))
fails++
}
}
report = append(report, summary)
log.Debugf("Done %s", m.Name)
}
if fails != 0 {
log.Warn("Overall we failed")
}
fmt.Printf("\n")
// We need to find a better way here to report that some tests failed to even
// get into the table - such as JSON file parse failures
// Actually, now we report file failures into the report as well, we should not
// see this - but, it is nice to leave as a sanity check.
if len(report) < fails+passes {
fmt.Printf("Warning: some tests (%d) failed to report\n", (fails+passes)-len(report))
}
// Note - not logging here - the summary goes to stdout
fmt.Println("Report Summary:")
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader((&metricsCheck{}).reportTitleSlice())
for _, s := range report {
table.Append(s)
}
table.Render()
fmt.Printf("Fails: %d, Passes %d\n", fails, passes)
// Did we see any failures during the run?
if fails != 0 {
err = errors.New("Failed")
} else {
err = nil
}
return
}
// checkmetrics main entry point.
// Do the command line processing, load the TOML file, and do the processing
// against the data files
func main() {
app := cli.NewApp()
app.Name = name
app.Usage = usage
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "basefile",
Usage: "path to baseline TOML metrics file",
},
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output in the log",
},
cli.StringFlag{
Name: "log",
Usage: "set the log file path",
},
cli.StringFlag{
Name: "metricsdir",
Usage: "directory containing metrics results files",
},
cli.BoolFlag{
Name: "percentage",
Usage: "present results as percentage differences",
Destination: &showPercentage,
},
}
app.Before = func(context *cli.Context) error {
var err error
var baseFilePath string
if path := context.GlobalString("log"); path != "" {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0640)
if err != nil {
return err
}
log.SetOutput(f)
}
if context.GlobalBool("debug") {
log.SetLevel(log.DebugLevel)
}
if context.GlobalString("metricsdir") == "" {
log.Error("Must supply metricsdir argument")
return errors.New("Must supply metricsdir argument")
}
baseFilePath = context.GlobalString("basefile")
if baseFilePath == "" {
baseFilePath = sysBaseFile
}
ciBasefile, err = newBasefile(baseFilePath)
return err
}
app.Action = func(context *cli.Context) error {
return processMetricsBaseline(context)
}
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}