-
Notifications
You must be signed in to change notification settings - Fork 1.5k
/
collector.go
370 lines (315 loc) · 11.4 KB
/
collector.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package otelcol handles the command-line, configuration, and runs the OpenTelemetry Collector.
// It contains the main [Collector] struct and its constructor [NewCollector].
// [Collector.Run] starts the Collector and then blocks until it shuts down.
package otelcol // import "go.opentelemetry.io/collector/otelcol"
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"sync/atomic"
"syscall"
"go.uber.org/multierr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/confmap"
"go.opentelemetry.io/collector/extension"
"go.opentelemetry.io/collector/otelcol/internal/grpclog"
"go.opentelemetry.io/collector/service"
)
// State defines Collector's state.
type State int
const (
StateStarting State = iota
StateRunning
StateClosing
StateClosed
)
func (s State) String() string {
switch s {
case StateStarting:
return "Starting"
case StateRunning:
return "Running"
case StateClosing:
return "Closing"
case StateClosed:
return "Closed"
}
return "UNKNOWN"
}
// CollectorSettings holds configuration for creating a new Collector.
type CollectorSettings struct {
// Factories service factories.
Factories func() (Factories, error)
// BuildInfo provides collector start information.
BuildInfo component.BuildInfo
// DisableGracefulShutdown disables the automatic graceful shutdown
// of the collector on SIGINT or SIGTERM.
// Users who want to handle signals themselves can disable this behavior
// and manually handle the signals to shutdown the collector.
DisableGracefulShutdown bool
// ConfigProviderSettings allows configuring the way the Collector retrieves its configuration
// The Collector will reload based on configuration changes from the ConfigProvider if any
// confmap.Providers watch for configuration changes.
ConfigProviderSettings ConfigProviderSettings
// LoggingOptions provides a way to change behavior of zap logging.
LoggingOptions []zap.Option
// SkipSettingGRPCLogger avoids setting the grpc logger
SkipSettingGRPCLogger bool
}
// (Internal note) Collector Lifecycle:
// - New constructs a new Collector.
// - Run starts the collector.
// - Run calls setupConfigurationComponents to handle configuration.
// If configuration parser fails, collector's config can be reloaded.
// Collector can be shutdown if parser gets a shutdown error.
// - Run runs runAndWaitForShutdownEvent and waits for a shutdown event.
// SIGINT and SIGTERM, errors, and (*Collector).Shutdown can trigger the shutdown events.
// - Upon shutdown, pipelines are notified, then pipelines and extensions are shut down.
// - Users can call (*Collector).Shutdown anytime to shut down the collector.
// Collector represents a server providing the OpenTelemetry Collector service.
type Collector struct {
set CollectorSettings
configProvider ConfigProvider
serviceConfig *service.Config
service *service.Service
state *atomic.Int64
// shutdownChan is used to terminate the collector.
shutdownChan chan struct{}
// signalsChannel is used to receive termination signals from the OS.
signalsChannel chan os.Signal
// asyncErrorChannel is used to signal a fatal error from any component.
asyncErrorChannel chan error
bc *bufferedCore
updateConfigProviderLogger func(core zapcore.Core)
}
// NewCollector creates and returns a new instance of Collector.
func NewCollector(set CollectorSettings) (*Collector, error) {
bc := newBufferedCore(zapcore.DebugLevel)
cc := &collectorCore{core: bc}
options := append([]zap.Option{zap.WithCaller(true)}, set.LoggingOptions...)
logger := zap.New(cc, options...)
set.ConfigProviderSettings.ResolverSettings.ProviderSettings = confmap.ProviderSettings{Logger: logger}
set.ConfigProviderSettings.ResolverSettings.ConverterSettings = confmap.ConverterSettings{Logger: logger}
configProvider, err := NewConfigProvider(set.ConfigProviderSettings)
if err != nil {
return nil, err
}
state := new(atomic.Int64)
state.Store(int64(StateStarting))
return &Collector{
set: set,
state: state,
shutdownChan: make(chan struct{}),
// Per signal.Notify documentation, a size of the channel equaled with
// the number of signals getting notified on is recommended.
signalsChannel: make(chan os.Signal, 3),
asyncErrorChannel: make(chan error),
configProvider: configProvider,
bc: bc,
updateConfigProviderLogger: cc.SetCore,
}, nil
}
// GetState returns current state of the collector server.
func (col *Collector) GetState() State {
return State(col.state.Load())
}
// Shutdown shuts down the collector server.
func (col *Collector) Shutdown() {
// Only shutdown if we're in a Running or Starting State else noop
state := col.GetState()
if state == StateRunning || state == StateStarting {
defer func() {
_ = recover()
}()
close(col.shutdownChan)
}
}
// setupConfigurationComponents loads the config, creates the graph, and starts the components. If all the steps succeeds it
// sets the col.service with the service currently running.
func (col *Collector) setupConfigurationComponents(ctx context.Context) error {
col.setCollectorState(StateStarting)
factories, err := col.set.Factories()
if err != nil {
return fmt.Errorf("failed to initialize factories: %w", err)
}
cfg, err := col.configProvider.Get(ctx, factories)
if err != nil {
return fmt.Errorf("failed to get config: %w", err)
}
if err = cfg.Validate(); err != nil {
return fmt.Errorf("invalid configuration: %w", err)
}
col.serviceConfig = &cfg.Service
conf := confmap.New()
if err = conf.Marshal(cfg); err != nil {
return fmt.Errorf("could not marshal configuration: %w", err)
}
col.service, err = service.New(ctx, service.Settings{
BuildInfo: col.set.BuildInfo,
CollectorConf: conf,
ReceiversConfigs: cfg.Receivers,
ReceiversFactories: factories.Receivers,
ProcessorsConfigs: cfg.Processors,
ProcessorsFactories: factories.Processors,
ExportersConfigs: cfg.Exporters,
ExportersFactories: factories.Exporters,
ConnectorsConfigs: cfg.Connectors,
ConnectorsFactories: factories.Connectors,
ExtensionsConfigs: cfg.Extensions,
ExtensionsFactories: factories.Extensions,
ModuleInfo: extension.ModuleInfo{
Receiver: factories.ReceiverModules,
Processor: factories.ProcessorModules,
Exporter: factories.ExporterModules,
Extension: factories.ExtensionModules,
Connector: factories.ConnectorModules,
},
AsyncErrorChannel: col.asyncErrorChannel,
LoggingOptions: col.set.LoggingOptions,
}, cfg.Service)
if err != nil {
return err
}
if col.updateConfigProviderLogger != nil {
col.updateConfigProviderLogger(col.service.Logger().Core())
}
if col.bc != nil {
x := col.bc.TakeLogs()
for _, log := range x {
ce := col.service.Logger().Core().Check(log.Entry, nil)
if ce != nil {
ce.Write(log.Context...)
}
}
}
if !col.set.SkipSettingGRPCLogger {
grpclog.SetLogger(col.service.Logger(), cfg.Service.Telemetry.Logs.Level)
}
if err = col.service.Start(ctx); err != nil {
return multierr.Combine(err, col.service.Shutdown(ctx))
}
col.setCollectorState(StateRunning)
return nil
}
func (col *Collector) reloadConfiguration(ctx context.Context) error {
col.service.Logger().Warn("Config updated, restart service")
col.setCollectorState(StateClosing)
if err := col.service.Shutdown(ctx); err != nil {
return fmt.Errorf("failed to shutdown the retiring config: %w", err)
}
if err := col.setupConfigurationComponents(ctx); err != nil {
return fmt.Errorf("failed to setup configuration components: %w", err)
}
return nil
}
func (col *Collector) DryRun(ctx context.Context) error {
factories, err := col.set.Factories()
if err != nil {
return fmt.Errorf("failed to initialize factories: %w", err)
}
cfg, err := col.configProvider.Get(ctx, factories)
if err != nil {
return fmt.Errorf("failed to get config: %w", err)
}
return cfg.Validate()
}
func newFallbackLogger(options []zap.Option) (*zap.Logger, error) {
ec := zap.NewProductionEncoderConfig()
ec.EncodeTime = zapcore.ISO8601TimeEncoder
zapCfg := &zap.Config{
Level: zap.NewAtomicLevelAt(zapcore.DebugLevel),
Encoding: "console",
EncoderConfig: ec,
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
}
return zapCfg.Build(options...)
}
// Run starts the collector according to the given configuration, and waits for it to complete.
// Consecutive calls to Run are not allowed, Run shouldn't be called once a collector is shut down.
// Sets up the control logic for config reloading and shutdown.
func (col *Collector) Run(ctx context.Context) error {
// setupConfigurationComponents is the "main" function responsible for startup
if err := col.setupConfigurationComponents(ctx); err != nil {
col.setCollectorState(StateClosed)
logger, loggerErr := newFallbackLogger(col.set.LoggingOptions)
if loggerErr != nil {
return errors.Join(err, fmt.Errorf("unable to create fallback logger: %w", loggerErr))
}
if col.bc != nil {
x := col.bc.TakeLogs()
for _, log := range x {
ce := logger.Core().Check(log.Entry, nil)
if ce != nil {
ce.Write(log.Context...)
}
}
}
return err
}
// Always notify with SIGHUP for configuration reloading.
signal.Notify(col.signalsChannel, syscall.SIGHUP)
defer signal.Stop(col.signalsChannel)
// Only notify with SIGTERM and SIGINT if graceful shutdown is enabled.
if !col.set.DisableGracefulShutdown {
signal.Notify(col.signalsChannel, os.Interrupt, syscall.SIGTERM)
}
// Control loop: selects between channels for various interrupts - when this loop is broken, the collector exits.
// If a configuration reload fails, we return without waiting for graceful shutdown.
LOOP:
for {
select {
case err := <-col.configProvider.Watch():
if err != nil {
col.service.Logger().Error("Config watch failed", zap.Error(err))
break LOOP
}
if err = col.reloadConfiguration(ctx); err != nil {
return err
}
case err := <-col.asyncErrorChannel:
col.service.Logger().Error("Asynchronous error received, terminating process", zap.Error(err))
break LOOP
case s := <-col.signalsChannel:
col.service.Logger().Info("Received signal from OS", zap.String("signal", s.String()))
if s != syscall.SIGHUP {
break LOOP
}
if err := col.reloadConfiguration(ctx); err != nil {
return err
}
case <-col.shutdownChan:
col.service.Logger().Info("Received shutdown request")
break LOOP
case <-ctx.Done():
col.service.Logger().Info("Context done, terminating process", zap.Error(ctx.Err()))
// Call shutdown with background context as the passed in context has been canceled
return col.shutdown(context.Background())
}
}
return col.shutdown(ctx)
}
func (col *Collector) shutdown(ctx context.Context) error {
col.setCollectorState(StateClosing)
// Accumulate errors and proceed with shutting down remaining components.
var errs error
if err := col.configProvider.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to shutdown config provider: %w", err))
}
// shutdown service
if err := col.service.Shutdown(ctx); err != nil {
errs = multierr.Append(errs, fmt.Errorf("failed to shutdown service after error: %w", err))
}
col.setCollectorState(StateClosed)
return errs
}
// setCollectorState provides current state of the collector
func (col *Collector) setCollectorState(state State) {
col.state.Store(int64(state))
}