This repository was archived by the owner on Sep 17, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 41
/
Copy pathingest-manager_test.go
286 lines (234 loc) · 8.57 KB
/
ingest-manager_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
package main
import (
"context"
"fmt"
"os"
"path"
"time"
"github.com/cucumber/godog"
"github.com/cucumber/messages-go/v10"
"github.com/elastic/e2e-testing/cli/config"
"github.com/elastic/e2e-testing/cli/docker"
"github.com/elastic/e2e-testing/cli/services"
"github.com/elastic/e2e-testing/cli/shell"
"github.com/elastic/e2e-testing/e2e"
log "github.com/sirupsen/logrus"
)
// developerMode tears down the backend services (ES, Kibana, Package Registry)
// after a test suite. This is the desired behavior, but when developing, we maybe want to keep
// them running to speed up the development cycle.
// It can be overriden by the DEVELOPER_MODE env var
var developerMode = false
// ElasticAgentProcessName the name of the process for the Elastic Agent
const ElasticAgentProcessName = "elastic-agent"
// ElasticAgentServiceName the name of the service for the Elastic Agent
const ElasticAgentServiceName = "elastic-agent"
// FleetProfileName the name of the profile to run the runtime, backend services
const FleetProfileName = "fleet"
var agentVersionBase = "8.0.0-SNAPSHOT"
// agentVersion is the version of the agent to use
// It can be overriden by ELASTIC_AGENT_VERSION env var
var agentVersion = agentVersionBase
// stackVersion is the version of the stack to use
// It can be overriden by STACK_VERSION env var
var stackVersion = agentVersionBase
// profileEnv is the environment to be applied to any execution
// affecting the runtime dependencies (or profile)
var profileEnv map[string]string
// timeoutFactor a multiplier for the max timeout when doing backoff retries.
// It can be overriden by TIMEOUT_FACTOR env var
var timeoutFactor = 3
// All URLs running on localhost as Kibana is expected to be exposed there
const kibanaBaseURL = "http://localhost:5601"
var kibanaClient *services.KibanaClient
func init() {
config.Init()
kibanaClient = services.NewKibanaClient()
developerMode, _ = shell.GetEnvBool("DEVELOPER_MODE")
if developerMode {
log.Info("Running in Developer mode 💻: runtime dependencies between different test runs will be reused to speed up dev cycle")
}
timeoutFactor = shell.GetEnvInteger("TIMEOUT_FACTOR", timeoutFactor)
agentVersion = shell.GetEnv("ELASTIC_AGENT_VERSION", agentVersionBase)
stackVersion = shell.GetEnv("STACK_VERSION", stackVersion)
}
func IngestManagerFeatureContext(s *godog.Suite) {
agentVersionBase = e2e.GetElasticArtifactVersion(agentVersionBase)
imts := IngestManagerTestSuite{
Fleet: &FleetTestSuite{
Installers: map[string]ElasticAgentInstaller{
"centos-systemd": GetElasticAgentInstaller("centos", "systemd"),
"centos-tar": GetElasticAgentInstaller("centos", "tar"),
"debian-systemd": GetElasticAgentInstaller("debian", "systemd"),
"debian-tar": GetElasticAgentInstaller("debian", "tar"),
},
},
StandAlone: &StandAloneTestSuite{},
}
serviceManager := services.NewServiceManager()
s.Step(`^the "([^"]*)" process is in the "([^"]*)" state on the host$`, imts.processStateOnTheHost)
imts.Fleet.contributeSteps(s)
imts.StandAlone.contributeSteps(s)
s.BeforeSuite(func() {
log.Trace("Installing Fleet runtime dependencies")
workDir, _ := os.Getwd()
profileEnv = map[string]string{
"stackVersion": stackVersion,
"kibanaConfigPath": path.Join(workDir, "configurations", "kibana.config.yml"),
}
profile := FleetProfileName
err := serviceManager.RunCompose(true, []string{profile}, profileEnv)
if err != nil {
log.WithFields(log.Fields{
"profile": profile,
}).Fatal("Could not run the runtime dependencies for the profile.")
}
minutesToBeHealthy := time.Duration(timeoutFactor) * time.Minute
healthy, err := e2e.WaitForElasticsearch(minutesToBeHealthy)
if !healthy {
log.WithFields(log.Fields{
"error": err,
"minutes": minutesToBeHealthy,
}).Fatal("The Elasticsearch cluster could not get the healthy status")
}
healthyKibana, err := kibanaClient.WaitForKibana(minutesToBeHealthy)
if !healthyKibana {
log.WithFields(log.Fields{
"error": err,
"minutes": minutesToBeHealthy,
}).Fatal("The Kibana instance could not get the healthy status")
}
imts.Fleet.setup()
imts.StandAlone.RuntimeDependenciesStartDate = time.Now().UTC()
})
s.BeforeScenario(func(*messages.Pickle) {
log.Trace("Before Fleet scenario")
imts.StandAlone.Cleanup = false
imts.Fleet.beforeScenario()
})
s.AfterSuite(func() {
if !developerMode {
log.Debug("Destroying Fleet runtime dependencies")
profile := FleetProfileName
err := serviceManager.StopCompose(true, []string{profile})
if err != nil {
log.WithFields(log.Fields{
"error": err,
"profile": profile,
}).Warn("Could not destroy the runtime dependencies for the profile.")
}
}
installers := imts.Fleet.Installers
for k, v := range installers {
agentPath := v.path
if _, err := os.Stat(agentPath); err == nil {
err = os.Remove(agentPath)
if err != nil {
log.WithFields(log.Fields{
"err": err,
"installer": k,
"path": agentPath,
}).Warn("Elastic Agent binary could not be removed.")
} else {
log.WithFields(log.Fields{
"installer": k,
"path": agentPath,
}).Debug("Elastic Agent binary was removed.")
}
}
}
})
s.AfterScenario(func(*messages.Pickle, error) {
log.Trace("After Fleet scenario")
if imts.StandAlone.Cleanup {
imts.StandAlone.afterScenario()
}
if imts.Fleet.Cleanup {
imts.Fleet.afterScenario()
}
})
}
// IngestManagerTestSuite represents a test suite, holding references to the pieces needed to run the tests
type IngestManagerTestSuite struct {
Fleet *FleetTestSuite
StandAlone *StandAloneTestSuite
}
func (imts *IngestManagerTestSuite) processStateOnTheHost(process string, state string) error {
profile := FleetProfileName
serviceName := ElasticAgentServiceName
containerName := fmt.Sprintf("%s_%s_%s_%d", profile, imts.Fleet.Image+"-systemd", serviceName, 1)
if imts.StandAlone.Hostname != "" {
containerName = fmt.Sprintf("%s_%s_%d", profile, serviceName, 1)
}
return checkProcessStateOnTheHost(containerName, process, state)
}
// name of the container for the service:
// we are using the Docker client instead of docker-compose
// because it does not support returning the output of a
// command: it simply returns error level
func checkProcessStateOnTheHost(containerName string, process string, state string) error {
timeout := time.Duration(timeoutFactor) * time.Minute
err := e2e.WaitForProcess(containerName, process, state, timeout)
if err != nil {
if state == "started" {
log.WithFields(log.Fields{
"container ": containerName,
"error": err,
"timeout": timeout,
}).Error("The process was not found but should be present")
} else {
log.WithFields(log.Fields{
"container": containerName,
"error": err,
"timeout": timeout,
}).Error("The process was found but shouldn't be present")
}
return err
}
return nil
}
func execCommandInService(profile string, image string, serviceName string, cmds []string, detach bool) error {
serviceManager := services.NewServiceManager()
composes := []string{
profile, // profile name
image, // image for the service
}
composeArgs := []string{"exec", "-T"}
if detach {
composeArgs = append(composeArgs, "-d")
}
composeArgs = append(composeArgs, serviceName)
composeArgs = append(composeArgs, cmds...)
err := serviceManager.RunCommand(profile, composes, composeArgs, profileEnv)
if err != nil {
log.WithFields(log.Fields{
"command": cmds,
"error": err,
"service": serviceName,
}).Error("Could not execute command in container")
return err
}
return nil
}
// we need the container name because we use the Docker Client instead of Docker Compose
func getContainerHostname(containerName string) (string, error) {
log.WithFields(log.Fields{
"containerName": containerName,
}).Trace("Retrieving container name from the Docker client")
hostname, err := docker.ExecCommandIntoContainer(context.Background(), containerName, "root", []string{"cat", "/etc/hostname"})
if err != nil {
log.WithFields(log.Fields{
"containerName": containerName,
"error": err,
}).Error("Could not retrieve container name from the Docker client")
return "", err
}
log.WithFields(log.Fields{
"containerName": containerName,
"hostname": hostname,
}).Info("Hostname retrieved from the Docker client")
return hostname, nil
}