From 6b66b73e709a58c588f2a746bc2c5830f2de007e Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 29 Jun 2022 21:14:33 -0400 Subject: [PATCH 01/19] Add runtime for command v2 components. --- .gitignore | 3 + internal/pkg/agent/cmd/container.go | 8 +- internal/pkg/agent/cmd/enroll_cmd.go | 5 +- internal/pkg/agent/configuration/settings.go | 2 +- internal/pkg/agent/operation/common_test.go | 2 +- .../pkg/agent/operation/monitoring_test.go | 2 +- .../core/monitoring/server/processes_test.go | 2 +- internal/pkg/core/plugin/process/app.go | 10 +- internal/pkg/core/plugin/process/start.go | 7 +- internal/pkg/core/plugin/process/status.go | 2 +- internal/pkg/core/plugin/service/app.go | 2 +- internal/pkg/core/process/process.go | 91 -- internal/pkg/core/state/state.go | 3 +- magefile.go | 4 + main.go | 4 +- pkg/component/fake/README.md | 3 + pkg/component/fake/main.go | 279 ++++ pkg/component/runtime/command.go | 362 +++++ pkg/component/runtime/failed.go | 99 ++ pkg/component/runtime/manager.go | 572 ++++++++ pkg/component/runtime/manager_test.go | 1172 +++++++++++++++++ pkg/component/runtime/runtime.go | 353 +++++ pkg/component/runtime/runtime_comm.go | 311 +++++ pkg/component/runtime/subscription.go | 24 + pkg/component/spec.go | 17 +- {internal/pkg => pkg}/core/process/cmd.go | 10 +- .../pkg => pkg}/core/process/cmd_darwin.go | 13 +- .../pkg => pkg}/core/process/cmd_linux.go | 10 +- {internal/pkg => pkg}/core/process/config.go | 0 pkg/core/process/external_unix.go | 30 + pkg/core/process/external_windows.go | 53 + .../cmd/proc => pkg/core/process}/job_unix.go | 2 +- .../proc => pkg/core/process}/job_windows.go | 2 +- pkg/core/process/process.go | 101 ++ 34 files changed, 3425 insertions(+), 135 deletions(-) delete mode 100644 internal/pkg/core/process/process.go create mode 100644 pkg/component/fake/README.md create mode 100644 pkg/component/fake/main.go create mode 100644 pkg/component/runtime/command.go create mode 100644 pkg/component/runtime/failed.go create mode 100644 pkg/component/runtime/manager.go create mode 100644 pkg/component/runtime/manager_test.go create mode 100644 pkg/component/runtime/runtime.go create mode 100644 pkg/component/runtime/runtime_comm.go create mode 100644 pkg/component/runtime/subscription.go rename {internal/pkg => pkg}/core/process/cmd.go (78%) rename {internal/pkg => pkg}/core/process/cmd_darwin.go (77%) rename {internal/pkg => pkg}/core/process/cmd_linux.go (81%) rename {internal/pkg => pkg}/core/process/config.go (100%) create mode 100644 pkg/core/process/external_unix.go create mode 100644 pkg/core/process/external_windows.go rename {internal/pkg/agent/cmd/proc => pkg/core/process}/job_unix.go (97%) rename {internal/pkg/agent/cmd/proc => pkg/core/process}/job_windows.go (99%) create mode 100644 pkg/core/process/process.go diff --git a/.gitignore b/.gitignore index 89eaa67db73..f0b7911dbef 100644 --- a/.gitignore +++ b/.gitignore @@ -57,8 +57,11 @@ internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/configura internal/pkg/agent/operation/tests/scripts/servicable-1.0-darwin-x86/configurable internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/configurable internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/serviceable +internal/pkg/agent/operation/tests/scripts/configurable +internal/pkg/agent/operation/tests/scripts/serviceable internal/pkg/agent/application/fleet.yml internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/exec +pkg/component/fake/fake # VSCode /.vscode diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index 047d51a8fef..ed911996508 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/elastic/elastic-agent/pkg/core/process" "io" "io/ioutil" "net/url" @@ -36,7 +37,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/artifact/install/tar" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/version" @@ -727,10 +727,6 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err apmDir := filepath.Join(path, files[0].Name()) // Start apm-server process respecting path ENVs apmBinary := filepath.Join(apmDir, spec.Cmd) - log, err := logger.New("apm-server", false) - if err != nil { - return nil, err - } // add APM Server specific configuration var args []string addEnv := func(arg, env string) { @@ -751,7 +747,7 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err addEnv("--httpprof", "HTTPPROF") addSettingEnv("gc_percent", "APMSERVER_GOGC") logInfo(streams, "Starting legacy apm-server daemon as a subprocess.") - return process.Start(log, apmBinary, nil, os.Geteuid(), os.Getegid(), args) + return process.Start(apmBinary, os.Geteuid(), os.Getegid(), args, nil) } func logToStderr(cfg *configuration.Configuration) { diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index 6d8858a99c4..74f63b1660c 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -8,6 +8,7 @@ import ( "bytes" "context" "fmt" + "github.com/elastic/elastic-agent/pkg/core/process" "io" "io/ioutil" "math/rand" @@ -37,7 +38,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/authority" "github.com/elastic/elastic-agent/internal/pkg/core/backoff" monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" fleetclient "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/release" @@ -600,9 +600,10 @@ func (c *enrollCmd) startAgent(ctx context.Context) (<-chan *os.ProcessState, er args = append(args, "--path.home.unversioned") } proc, err := process.StartContext( - ctx, c.log, cmd, nil, os.Geteuid(), os.Getegid(), args, func(c *exec.Cmd) { + ctx, cmd, os.Geteuid(), os.Getegid(), args, nil, func(c *exec.Cmd) error { c.Stdout = os.Stdout c.Stderr = os.Stderr + return nil }) if err != nil { return nil, err diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 93ef491670f..ee7f85558e0 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -5,11 +5,11 @@ package configuration import ( + "github.com/elastic/elastic-agent/pkg/core/process" "path/filepath" "github.com/elastic/elastic-agent/internal/pkg/artifact" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/server" diff --git a/internal/pkg/agent/operation/common_test.go b/internal/pkg/agent/operation/common_test.go index 2c897bbb8b5..f351e95f18a 100644 --- a/internal/pkg/agent/operation/common_test.go +++ b/internal/pkg/agent/operation/common_test.go @@ -6,6 +6,7 @@ package operation import ( "context" + "github.com/elastic/elastic-agent/pkg/core/process" "os" "path/filepath" "runtime" @@ -25,7 +26,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" diff --git a/internal/pkg/agent/operation/monitoring_test.go b/internal/pkg/agent/operation/monitoring_test.go index b48765612dd..d98c2c5362b 100644 --- a/internal/pkg/agent/operation/monitoring_test.go +++ b/internal/pkg/agent/operation/monitoring_test.go @@ -7,6 +7,7 @@ package operation import ( "context" "fmt" + "github.com/elastic/elastic-agent/pkg/core/process" "testing" "time" @@ -27,7 +28,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/core/status" diff --git a/internal/pkg/core/monitoring/server/processes_test.go b/internal/pkg/core/monitoring/server/processes_test.go index ff0728c8816..e739778ae77 100644 --- a/internal/pkg/core/monitoring/server/processes_test.go +++ b/internal/pkg/core/monitoring/server/processes_test.go @@ -8,6 +8,7 @@ import ( "bytes" "encoding/json" "fmt" + "github.com/elastic/elastic-agent/pkg/core/process" "net/http" "os" "testing" @@ -15,7 +16,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/sorted" ) diff --git a/internal/pkg/core/plugin/process/app.go b/internal/pkg/core/plugin/process/app.go index acb38ee92df..c7dc7169f8b 100644 --- a/internal/pkg/core/plugin/process/app.go +++ b/internal/pkg/core/plugin/process/app.go @@ -7,6 +7,7 @@ package process import ( "context" "fmt" + process2 "github.com/elastic/elastic-agent/pkg/core/process" "os" "reflect" "sync" @@ -19,7 +20,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" @@ -56,7 +56,7 @@ type Application struct { monitor monitoring.Monitor statusReporter status.Reporter - processConfig *process.Config + processConfig *process2.Config logger *logger.Logger @@ -190,7 +190,7 @@ func (a *Application) SetState(s state.Status, msg string, payload map[string]in a.setState(s, msg, payload) } -func (a *Application) watch(ctx context.Context, p app.Taggable, proc *process.Info, cfg map[string]interface{}) { +func (a *Application) watch(ctx context.Context, p app.Taggable, proc *process2.Info, cfg map[string]interface{}) { go func() { var procState *os.ProcessState @@ -235,7 +235,7 @@ func (a *Application) watch(ctx context.Context, p app.Taggable, proc *process.I }() } -func (a *Application) stopWatcher(procInfo *process.Info) { +func (a *Application) stopWatcher(procInfo *process2.Info) { if procInfo != nil { if closer, ok := a.watchClosers[procInfo.PID]; ok { closer() @@ -280,7 +280,7 @@ func (a *Application) cleanUp() { a.monitor.Cleanup(a.desc.Spec(), a.pipelineID) } -func (a *Application) gracefulKill(proc *process.Info) { +func (a *Application) gracefulKill(proc *process2.Info) { if proc == nil || proc.Process == nil { return } diff --git a/internal/pkg/core/plugin/process/start.go b/internal/pkg/core/plugin/process/start.go index 29770ae714b..4332ab071e3 100644 --- a/internal/pkg/core/plugin/process/start.go +++ b/internal/pkg/core/plugin/process/start.go @@ -7,6 +7,7 @@ package process import ( "context" "fmt" + "github.com/elastic/elastic-agent/pkg/core/process" "io" "os/exec" "path/filepath" @@ -18,7 +19,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/pkg/core/server" ) @@ -128,14 +128,13 @@ func (a *Application) start(ctx context.Context, t app.Taggable, cfg map[string] spec.Args = injectDataPath(spec.Args, a.pipelineID, a.id) a.state.ProcessInfo, err = process.Start( - a.logger, spec.BinaryPath, - a.processConfig, a.uid, a.gid, - spec.Args, func(c *exec.Cmd) { + spec.Args, nil, func(c *exec.Cmd) error { c.Stdout = newLoggerWriter(a.Name(), logStdOut, a.logger) c.Stderr = newLoggerWriter(a.Name(), logStdErr, a.logger) + return nil }) if err != nil { return fmt.Errorf("%q failed to start %q: %w", diff --git a/internal/pkg/core/plugin/process/status.go b/internal/pkg/core/plugin/process/status.go index 92883160398..e11f39d1d5f 100644 --- a/internal/pkg/core/plugin/process/status.go +++ b/internal/pkg/core/plugin/process/status.go @@ -7,13 +7,13 @@ package process import ( "context" "fmt" + "github.com/elastic/elastic-agent/pkg/core/process" "time" "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent-client/v7/pkg/proto" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/core/plugin/service/app.go b/internal/pkg/core/plugin/service/app.go index 14a3abb6148..514d879986d 100644 --- a/internal/pkg/core/plugin/service/app.go +++ b/internal/pkg/core/plugin/service/app.go @@ -7,6 +7,7 @@ package service import ( "context" "fmt" + "github.com/elastic/elastic-agent/pkg/core/process" "io" "net" "reflect" @@ -23,7 +24,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" "github.com/elastic/elastic-agent/internal/pkg/core/plugin" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" diff --git a/internal/pkg/core/process/process.go b/internal/pkg/core/process/process.go deleted file mode 100644 index 65613fb9978..00000000000 --- a/internal/pkg/core/process/process.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - "fmt" - "io" - "os" - "os/exec" - - "github.com/elastic/elastic-agent/internal/pkg/agent/cmd/proc" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -var ( - // ErrProcessStartFailedTimeout is a failure of start due to timeout - ErrProcessStartFailedTimeout = errors.New("process failed to start due to timeout") -) - -// Info groups information about fresh new process -type Info struct { - PID int - Process *os.Process - Stdin io.WriteCloser -} - -// Option is an option func to change the underlying command -type Option func(c *exec.Cmd) - -// Start starts a new process -// Returns: -// - network address of child process -// - process id -// - error -func Start(logger *logger.Logger, path string, config *Config, uid, gid int, args []string, opts ...Option) (proc *Info, err error) { - return StartContext(nil, logger, path, config, uid, gid, args, opts...) //nolint:staticcheck // calls a different function if no ctx -} - -// StartContext starts a new process with context. -// Returns: -// - network address of child process -// - process id -// - error -func StartContext(ctx context.Context, logger *logger.Logger, path string, config *Config, uid, gid int, args []string, opts ...Option) (*Info, error) { - cmd := getCmd(ctx, logger, path, []string{}, uid, gid, args...) - for _, o := range opts { - o(cmd) - } - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - - // start process - if err := cmd.Start(); err != nil { - return nil, errors.New(err, fmt.Sprintf("failed to start '%s'", path)) - } - - // Hook to JobObject on windows, noop on other platforms. - // This ties the application processes lifespan to the agent's. - // Fixes the orphaned beats processes left behind situation - // after the agent process gets killed. - if err := proc.JobObject.Assign(cmd.Process); err != nil { - logger.Errorf("application process failed job assign: %v", err) - } - - return &Info{ - PID: cmd.Process.Pid, - Process: cmd.Process, - Stdin: stdin, - }, err -} - -// Stop stops the process cleanly. -func (i *Info) Stop() error { - return terminateCmd(i.Process) -} - -// StopWait stops the process and waits for it to exit. -func (i *Info) StopWait() error { - err := i.Stop() - if err != nil { - return err - } - _, err = i.Process.Wait() - return err -} diff --git a/internal/pkg/core/state/state.go b/internal/pkg/core/state/state.go index 080efb42c88..4ad0f17cf8b 100644 --- a/internal/pkg/core/state/state.go +++ b/internal/pkg/core/state/state.go @@ -6,11 +6,10 @@ package state import ( "context" + "github.com/elastic/elastic-agent/pkg/core/process" "strings" "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/core/process" ) // Status describes the current status of the application process. diff --git a/magefile.go b/magefile.go index 6bb88c5fff9..024917ad2c7 100644 --- a/magefile.go +++ b/magefile.go @@ -269,18 +269,22 @@ func (Build) Clean() { func (Build) TestBinaries() error { p := filepath.Join("internal", "pkg", "agent", "operation", "tests", "scripts") p2 := filepath.Join("internal", "pkg", "agent", "transpiler", "tests") + p3 := filepath.Join("pkg", "component") configurableName := "configurable" serviceableName := "serviceable" execName := "exec" + fakeName := "fake" if runtime.GOOS == "windows" { configurableName += ".exe" serviceableName += ".exe" execName += ".exe" + fakeName += ".exe" } return combineErr( RunGo("build", "-o", filepath.Join(p, configurableName), filepath.Join(p, "configurable-1.0-darwin-x86_64", "main.go")), RunGo("build", "-o", filepath.Join(p, serviceableName), filepath.Join(p, "serviceable-1.0-darwin-x86_64", "main.go")), RunGo("build", "-o", filepath.Join(p2, "exec-1.0-darwin-x86_64", execName), filepath.Join(p2, "exec-1.0-darwin-x86_64", "main.go")), + RunGo("build", "-o", filepath.Join(p3, "fake", fakeName), filepath.Join(p3, "fake", "main.go")), ) } diff --git a/main.go b/main.go index 81fb4712d01..230135d3346 100644 --- a/main.go +++ b/main.go @@ -6,12 +6,12 @@ package main import ( "fmt" + "github.com/elastic/elastic-agent/pkg/core/process" "math/rand" "os" "time" "github.com/elastic/elastic-agent/internal/pkg/agent/cmd" - "github.com/elastic/elastic-agent/internal/pkg/agent/cmd/proc" ) // Setups and Runs agent. @@ -21,7 +21,7 @@ func main() { os.Exit(1) } - pj, err := proc.CreateJobObject() + pj, err := process.CreateJobObject() if err != nil { fmt.Fprintf(os.Stderr, "Failed to initialize process job object: %v\n", err) os.Exit(1) diff --git a/pkg/component/fake/README.md b/pkg/component/fake/README.md new file mode 100644 index 00000000000..678fb3dd4f2 --- /dev/null +++ b/pkg/component/fake/README.md @@ -0,0 +1,3 @@ +# Fake Component + +Controllable through GRPC control protocol with actions. Allows unit tests to simulate control and communication with a running sub-process. diff --git a/pkg/component/fake/main.go b/pkg/component/fake/main.go new file mode 100644 index 00000000000..c8127125f4e --- /dev/null +++ b/pkg/component/fake/main.go @@ -0,0 +1,279 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "context" + "errors" + "fmt" + "gopkg.in/yaml.v2" + "os" + "os/signal" + "syscall" + "time" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" +) + +func main() { + err := run() + if err != nil { + fmt.Printf("%s\n", err) + os.Exit(1) + } +} + +func run() error { + ver := client.VersionInfo{ + Name: "fake", + Version: "1.0", + Meta: map[string]string{ + "input": "fake", + }, + } + c, _, err := client.NewV2FromReader(os.Stdin, ver) + if err != nil { + return fmt.Errorf("failed to create GRPC client: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + n := make(chan os.Signal, 1) + signal.Notify(n, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + defer func() { + signal.Stop(n) + cancel() + }() + go func() { + select { + case <-n: + cancel() + case <-ctx.Done(): + } + }() + + err = c.Start(ctx) + if err != nil { + return fmt.Errorf("failed to start GRPC client: %w", err) + } + + s := newStateManager() + for { + select { + case <-ctx.Done(): + return nil + case change := <-c.UnitChanges(): + switch change.Type { + case client.UnitChangedAdded: + s.added(change.Unit) + case client.UnitChangedModified: + s.modified(change.Unit) + case client.UnitChangedRemoved: + s.removed(change.Unit) + } + case <-c.Errors(): + //fmt.Fprintf(os.Stderr, "GRPC client error: %s", err) + } + } +} + +type unitKey struct { + unitType client.UnitType + unitID string +} + +type stateManager struct { + units map[unitKey]runningUnit +} + +func newStateManager() *stateManager { + return &stateManager{units: make(map[unitKey]runningUnit)} +} + +func (s *stateManager) added(unit *client.Unit) { + k := newUnitKey(unit) + _, ok := s.units[k] + if ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate unit", nil) + return + } + r, err := newRunningUnit(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + return + } + s.units[k] = r +} + +func (s *stateManager) modified(unit *client.Unit) { + existing, ok := s.units[newUnitKey(unit)] + if !ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: unknown unit", nil) + return + } + err := existing.Update(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + } +} + +func (s *stateManager) removed(unit *client.Unit) { + k := newUnitKey(unit) + _, ok := s.units[k] + if !ok { + return + } + delete(s.units, k) +} + +type runningUnit interface { + Unit() *client.Unit + Update(u *client.Unit) error +} + +type fakeInput struct { + unit *client.Unit + cfg map[string]interface{} + + state client.UnitState + stateMsg string +} + +func newFakeInput(unit *client.Unit, cfg map[string]interface{}) (*fakeInput, error) { + state, msg, err := getStateFromMap(cfg) + if err != nil { + return nil, err + } + i := &fakeInput{ + unit: unit, + cfg: cfg, + state: state, + stateMsg: msg, + } + unit.RegisterAction(&stateSetterAction{i}) + unit.RegisterAction(&killAction{}) + _ = unit.UpdateState(i.state, i.stateMsg, nil) + return i, nil +} + +func (f *fakeInput) Unit() *client.Unit { + return f.unit +} + +func (f *fakeInput) Update(u *client.Unit) error { + expected, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this input to stop + _ = u.UpdateState(client.UnitStateStopping, "Stopping", nil) + go func() { + <-time.After(1 * time.Second) + _ = u.UpdateState(client.UnitStateStopped, "Stopped", nil) + }() + return nil + } + + var cfg map[string]interface{} + err := yaml.Unmarshal([]byte(config), &cfg) + if err != nil { + return fmt.Errorf("failed to unmarshal YAML: %w", err) + } + unitType, ok := cfg["type"] + if !ok { + return fmt.Errorf("unit missing config type") + } + if unitType != "fake" { + return fmt.Errorf("unit type changed with the same unit ID: %s", unitType) + } + + state, stateMsg, err := getStateFromMap(cfg) + if err != nil { + return fmt.Errorf("unit config parsing error: %s", err) + } + f.state = state + f.stateMsg = stateMsg + _ = u.UpdateState(f.state, f.stateMsg, nil) + return nil +} + +type stateSetterAction struct { + input *fakeInput +} + +func (s *stateSetterAction) Name() string { + return "set_state" +} + +func (s *stateSetterAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { + state, stateMsg, err := getStateFromMap(params) + if err != nil { + return nil, err + } + s.input.state = state + s.input.stateMsg = stateMsg + _ = s.input.unit.UpdateState(s.input.state, s.input.stateMsg, nil) + return nil, nil +} + +type killAction struct { +} + +func (s *killAction) Name() string { + return "kill" +} + +func (s *killAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { + os.Exit(1) + return nil, nil +} + +func newRunningUnit(unit *client.Unit) (runningUnit, error) { + _, config := unit.Expected() + var cfg map[string]interface{} + err := yaml.Unmarshal([]byte(config), &cfg) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal YAML: %w", err) + } + unitType, ok := cfg["type"] + if !ok { + return nil, fmt.Errorf("unit missing config type") + } + if unitType == "" { + return nil, fmt.Errorf("unit config type empty") + } + switch unitType { + case "fake": + return newFakeInput(unit, cfg) + } + return nil, fmt.Errorf("unknown unit config type: %s", unitType) +} + +func newUnitKey(unit *client.Unit) unitKey { + return unitKey{ + unitType: unit.Type(), + unitID: unit.ID(), + } +} + +func getStateFromMap(cfg map[string]interface{}) (client.UnitState, string, error) { + state, ok := cfg["state"] + if !ok { + return client.UnitStateStarting, "", errors.New("missing required state parameter") + } + stateTypeI, ok := state.(int) + if !ok { + // try float64 (JSON) does it differently than YAML + stateTypeF, ok := state.(float64) + if !ok { + return client.UnitStateStarting, "", fmt.Errorf("state parameter is not a valid unit state: %T", state) + } + stateTypeI = int(stateTypeF) + } + stateType := client.UnitState(stateTypeI) + stateMsgStr := "" + stateMsg, ok := cfg["message"] + if ok { + stateMsgStr, _ = stateMsg.(string) + } + return stateType, stateMsgStr, nil +} diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go new file mode 100644 index 00000000000..3169fee1521 --- /dev/null +++ b/pkg/component/runtime/command.go @@ -0,0 +1,362 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "gopkg.in/yaml.v2" + "os" + "os/exec" + "time" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/process" +) + +const ( + actionStart = 0 + actionStop = 1 +) + +type procState struct { + proc *process.Info + state *os.ProcessState +} + +type CommandRuntime struct { + current component.Component + + ch chan ComponentState + actionCh chan int + procCh chan procState + compCh chan component.Component + + actionState int + proc *process.Info + + expected ComponentState + observed ComponentState + lastCheckin time.Time + missedCheckins int +} + +func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { + if comp.Spec.Spec.Command == nil { + return nil, errors.New("must have command defined in input specification") + } + expected := newComponentState(&comp, client.UnitStateHealthy, "", 1) + observed := newComponentState(&comp, client.UnitStateStarting, "Starting", 0) + return &CommandRuntime{ + current: comp, + ch: make(chan ComponentState), + actionCh: make(chan int), + procCh: make(chan procState), + compCh: make(chan component.Component), + actionState: actionStart, + expected: expected, + observed: observed, + }, nil +} + +func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) { + c.forceCompState(client.UnitStateStarting, "Starting") + t := time.NewTicker(checkinPeriod) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case as := <-c.actionCh: + c.actionState = as + switch as { + case actionStart: + if err := c.start(comm); err != nil { + c.forceCompState(client.UnitStateFailed, err.Error()) + } + t.Reset(checkinPeriod) + case actionStop: + if err := c.stop(ctx); err != nil { + c.forceCompState(client.UnitStateFailed, err.Error()) + } + } + case ps := <-c.procCh: + // ignores old processes + if ps.proc == c.proc { + c.proc = nil + if c.handleProc(ps.state) { + // start again + if err := c.start(comm); err != nil { + c.forceCompState(client.UnitStateFailed, err.Error()) + } + } + t.Reset(checkinPeriod) + } + case newComp := <-c.compCh: + c.expected.syncComponent(&newComp, client.UnitStateHealthy, "Healthy", 1) + if c.mustSendExpected() { + c.sendExpected(comm) + } + case checkin := <-comm.CheckinObserved(): + sendExpected := false + changed := false + if c.observed.State == client.UnitStateStarting { + // first observation after start set component to healthy + c.observed.State = client.UnitStateHealthy + c.observed.Message = fmt.Sprintf("Healthy: communicating with pid '%d'", c.proc.PID) + changed = true + } + if c.lastCheckin.IsZero() { + // first check-in + sendExpected = true + } + c.lastCheckin = time.Now().UTC() + if c.observed.syncCheckin(checkin) { + changed = true + } + if c.mustSendExpected() { + sendExpected = true + } + if sendExpected { + c.sendExpected(comm) + } + if changed { + c.sendObserved() + } + if c.cleanupStopped() { + c.sendObserved() + } + case <-t.C: + if c.proc != nil && c.actionState == actionStart { + // running and should be running + now := time.Now().UTC() + if c.lastCheckin.IsZero() { + // never checked-in + c.missedCheckins += 1 + } else if now.Sub(c.lastCheckin) > checkinPeriod { + // missed check-in during required period + c.missedCheckins += 1 + } else if now.Sub(c.lastCheckin) <= checkinPeriod { + c.missedCheckins = 0 + } + if c.missedCheckins == 0 { + c.compState(client.UnitStateHealthy) + } else if c.missedCheckins > 0 && c.missedCheckins < maxCheckinMisses { + c.compState(client.UnitStateDegraded) + } else if c.missedCheckins >= maxCheckinMisses { + // something is wrong; the command should be checking in + // + // at this point it is assumed the sub-process has locked up and will not respond to a nice + // termination signal, so we jump directly to killing the process + msg := fmt.Sprintf("Failed: pid '%d' missed %d check-in's and will be killed", c.proc.PID, maxCheckinMisses) + c.forceCompState(client.UnitStateFailed, msg) + _ = c.proc.Kill() // watcher will handle it from here + } + } + } + } +} + +func (c *CommandRuntime) Watch() <-chan ComponentState { + return c.ch +} + +func (c *CommandRuntime) Start() error { + c.actionCh <- actionStart + return nil +} + +func (c *CommandRuntime) Update(comp component.Component) error { + c.compCh <- comp + return nil +} + +func (c *CommandRuntime) Stop() error { + c.actionCh <- actionStop + return nil +} + +func (c *CommandRuntime) Teardown() error { + // teardown is not different from stop for command runtime + return c.Stop() +} + +// forceCompState force updates the state for the entire component, forcing that state on all units. +func (c *CommandRuntime) forceCompState(state client.UnitState, msg string) { + c.observed.State = state + c.observed.Message = msg + for k, unit := range c.observed.Units { + unit.State = state + unit.Message = msg + unit.Payload = nil + unit.configStateIdx = 0 + + // unit is a copy and must be set back into the map + c.observed.Units[k] = unit + } + c.sendObserved() +} + +// compState updates just the component state not all the units. +func (c *CommandRuntime) compState(state client.UnitState) { + msg := "Unknown" + if state == client.UnitStateHealthy { + msg = fmt.Sprintf("Healthy: communicating with pid '%d'", c.proc.PID) + } else if state == client.UnitStateDegraded { + msg = fmt.Sprintf("Degraded: pid '%d' missed '%d' check-ins", c.proc.PID, c.missedCheckins) + } + if c.observed.State != state || c.observed.Message != msg { + c.observed.State = state + c.observed.Message = msg + c.sendObserved() + } +} + +func (c *CommandRuntime) sendObserved() { + c.ch <- c.observed.Copy() +} + +func (c *CommandRuntime) start(comm Communicator) error { + if c.proc != nil { + // already running + return nil + } + cmdSpec := c.current.Spec.Spec.Command + var env []string + for _, e := range cmdSpec.Env { + env = append(env, fmt.Sprintf("%s=%s", e.Name, e.Value)) + } + proc, err := process.Start(c.current.Spec.BinaryPath, os.Geteuid(), os.Getgid(), cmdSpec.Args, env, attachOutErr) + if err != nil { + return err + } + c.lastCheckin = time.Time{} + c.missedCheckins = 0 + c.proc = proc + c.forceCompState(client.UnitStateStarting, fmt.Sprintf("Starting: spawned pid '%d'", c.proc.PID)) + c.startWatcher(proc, comm) + return nil +} + +func (c *CommandRuntime) stop(ctx context.Context) error { + if c.proc == nil { + // already stopped + return nil + } + cmdSpec := c.current.Spec.Spec.Command + go func(info *process.Info, timeout time.Duration) { + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-ctx.Done(): + return + case <-t.C: + // kill no matter what (might already be stopped) + _ = info.Kill() + } + }(c.proc, cmdSpec.Timeouts.Stop) + return c.proc.Stop() +} + +func (c *CommandRuntime) startWatcher(info *process.Info, comm Communicator) { + go func() { + err := comm.WriteConnInfo(info.Stdin) + if err != nil { + c.forceCompState(client.UnitStateFailed, fmt.Sprintf("Failed: failed to provide connection information to spawned pid '%d': %s", info.PID, err)) + // kill instantly + info.Kill() + } else { + _ = info.Stdin.Close() + } + + ch := info.Wait() + s := <-ch + c.procCh <- procState{ + proc: info, + state: s, + } + }() +} + +func (c *CommandRuntime) handleProc(state *os.ProcessState) bool { + switch c.actionState { + case actionStart: + // should still be running + stopMsg := fmt.Sprintf("Failed: pid '%d' exited with code '%d'", state.Pid(), state.ExitCode()) + c.forceCompState(client.UnitStateFailed, stopMsg) + return true + case actionStop: + // stopping (should have exited) + stopMsg := fmt.Sprintf("Stopped: pid '%d' exited with code '%d'", state.Pid(), state.ExitCode()) + c.forceCompState(client.UnitStateStopped, stopMsg) + } + return false +} + +func (c *CommandRuntime) mustSendExpected() bool { + if len(c.expected.Units) != len(c.observed.Units) { + // mismatch on unit count + return true + } + for ek, e := range c.expected.Units { + o, ok := c.observed.Units[ek] + if !ok { + // unit missing + return true + } + if o.configStateIdx != e.configStateIdx || e.State != o.State { + // config or state mismatch + return true + } + } + return false +} + +func (c *CommandRuntime) sendExpected(comm Communicator) error { + units := make([]*proto.UnitExpected, 0, len(c.expected.Units)) + for k, u := range c.expected.Units { + e := &proto.UnitExpected{ + Id: k.UnitID, + Type: proto.UnitType(k.UnitType), + State: proto.State(u.State), + ConfigStateIdx: u.configStateIdx, + Config: "", + } + o, ok := c.observed.Units[k] + if !ok || o.configStateIdx != u.configStateIdx { + cfg, err := yaml.Marshal(u.config) + if err != nil { + return fmt.Errorf("failed to marshal YAML for unit %s: %w", k.UnitID, err) + } + e.Config = string(cfg) + } + units = append(units, e) + } + comm.CheckinExpected(&proto.CheckinExpected{Units: units}) + return nil +} + +func (c *CommandRuntime) cleanupStopped() bool { + cleaned := false + for ek, e := range c.expected.Units { + if e.State == client.UnitStateStopped { + // should be stopped; check if observed is also reporting stopped + o, ok := c.observed.Units[ek] + if ok && o.State == client.UnitStateStopped { + // its also stopped; so it can now be removed from both + delete(c.expected.Units, ek) + delete(c.observed.Units, ek) + cleaned = true + } + } + } + return cleaned +} + +func attachOutErr(cmd *exec.Cmd) error { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return nil +} diff --git a/pkg/component/runtime/failed.go b/pkg/component/runtime/failed.go new file mode 100644 index 00000000000..55a687aa1fc --- /dev/null +++ b/pkg/component/runtime/failed.go @@ -0,0 +1,99 @@ +package runtime + +import ( + "context" + "errors" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/elastic/elastic-agent/pkg/component" +) + +// FailedRuntime is used for a component that has an error from the component loader. +type FailedRuntime struct { + ch chan ComponentState + current component.Component + done chan bool +} + +// NewFailedRuntime creates a runtime for a component that has an error from the component loader. +func NewFailedRuntime(comp component.Component) (ComponentRuntime, error) { + if comp.Err == nil { + return nil, errors.New("must be a component that has a defined error") + } + return &FailedRuntime{ + ch: make(chan ComponentState), + current: comp, + done: make(chan bool), + }, nil +} + +// Run runs the runtime for a component that got an error from the component loader. +func (c *FailedRuntime) Run(ctx context.Context, _ Communicator) { + // state is hard coded to failed + c.ch <- createState(c.current, false) + select { + case <-ctx.Done(): + return + case <-c.done: + // set to stopped as soon as done is given + c.ch <- createState(c.current, true) + } + <-ctx.Done() +} + +// Watch returns the watch channel. +func (c *FailedRuntime) Watch() <-chan ComponentState { + return c.ch +} + +// Start does nothing. +func (c *FailedRuntime) Start() error { + return nil +} + +// Update updates the component state. +func (c *FailedRuntime) Update(comp component.Component) error { + if comp.Err == nil { + return errors.New("cannot update to a component without a defined error") + } + c.current = comp + return nil +} + +// Stop marks it stopped. +func (c *FailedRuntime) Stop() error { + go func() { + close(c.done) + }() + return nil +} + +// Teardown marks it stopped. +func (c *FailedRuntime) Teardown() error { + return c.Stop() +} + +func createState(comp component.Component, done bool) ComponentState { + state := client.UnitStateFailed + if done { + state = client.UnitStateStopped + } + unitErrs := make(map[ComponentUnitKey]ComponentUnitState) + for _, unit := range comp.Units { + key := ComponentUnitKey{ + UnitType: unit.Type, + UnitID: unit.ID, + } + unitErrs[key] = ComponentUnitState{ + State: state, + Message: comp.Err.Error(), + Payload: nil, + } + } + return ComponentState{ + State: state, + Message: comp.Err.Error(), + Units: unitErrs, + } +} diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go new file mode 100644 index 00000000000..69f3bc643b6 --- /dev/null +++ b/pkg/component/runtime/manager.go @@ -0,0 +1,572 @@ +package runtime + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "github.com/gofrs/uuid" + "net" + "strings" + "sync" + "time" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent-libs/atomic" + "go.elastic.co/apm" + "go.elastic.co/apm/module/apmgrpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/status" + + "github.com/elastic/elastic-agent/internal/pkg/core/authority" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +const ( + // initialCheckinTimeout is the maximum amount of wait time from initial check-in stream to + // getting the first check-in observed state. + initialCheckinTimeout = 5 * time.Second + // checkinPeriod is how often a component must send an observed checkin message over the communication + // control protocol. + checkinPeriod = 30 * time.Second + // maxCheckinMisses is the maximum number of check-in misses a component can miss before it is killed + // and restarted. + maxCheckinMisses = 3 +) + +var ( + // ErrNoUnit returned when manager is not controlling this unit. + ErrNoUnit = errors.New("no unit under control of this manager") +) + +// Manager for the entire runtime of operating components. +type Manager struct { + proto.UnimplementedElasticAgentServer + + logger *logger.Logger + ca *authority.CertificateAuthority + listenAddr string + tracer *apm.Tracer + + netMx sync.RWMutex + listener net.Listener + server *grpc.Server + + waitMx sync.RWMutex + waitReady map[string]waitForReady + + mx sync.RWMutex + current map[string]*componentRuntimeState + + subMx sync.RWMutex + subscriptions map[string][]*Subscription + + shuttingDown atomic.Bool +} + +// NewManager creates a new manager. +func NewManager(logger *logger.Logger, listenAddr string, tracer *apm.Tracer) (*Manager, error) { + ca, err := authority.NewCA() + if err != nil { + return nil, err + } + m := &Manager{ + logger: logger, + ca: ca, + listenAddr: listenAddr, + tracer: tracer, + waitReady: make(map[string]waitForReady), + current: make(map[string]*componentRuntimeState), + subscriptions: make(map[string][]*Subscription), + } + return m, nil +} + +// Run runs the manager. +// +// Blocks until the context is done. +func (m *Manager) Run(ctx context.Context) error { + lis, err := net.Listen("tcp", m.listenAddr) + if err != nil { + return err + } + m.netMx.Lock() + m.listener = lis + m.netMx.Unlock() + + certPool := x509.NewCertPool() + if ok := certPool.AppendCertsFromPEM(m.ca.Crt()); !ok { + return errors.New("failed to append root CA") + } + creds := credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: certPool, + GetCertificate: m.getCertificate, + MinVersion: tls.VersionTLS12, + }) + + var server *grpc.Server + if m.tracer != nil { + apmInterceptor := apmgrpc.NewUnaryServerInterceptor(apmgrpc.WithRecovery(), apmgrpc.WithTracer(m.tracer)) + server = grpc.NewServer( + grpc.UnaryInterceptor(apmInterceptor), + grpc.Creds(creds), + ) + } else { + server = grpc.NewServer(grpc.Creds(creds)) + } + m.netMx.Lock() + m.server = server + m.netMx.Unlock() + proto.RegisterElasticAgentServer(m.server, m) + m.shuttingDown.Store(false) + + // start serving GRPC connections + errCh := make(chan error) + go func() { + errCh <- server.Serve(lis) + }() + + select { + case <-ctx.Done(): + server.Stop() + err = <-errCh + case err = <-errCh: + } + m.shutdown() + m.netMx.Lock() + m.listener = nil + m.server = nil + m.netMx.Unlock() + return err +} + +// WaitForReady waits until the manager is ready to be used. +// +// This verifies that the GRPC server is up and running. +func (m *Manager) WaitForReady(ctx context.Context) error { + tk, err := uuid.NewV4() + if err != nil { + return err + } + token := tk.String() + name, err := genServerName() + if err != nil { + return err + } + pair, err := m.ca.GeneratePairWithName(name) + if err != nil { + return err + } + cert, err := tls.X509KeyPair(pair.Crt, pair.Key) + if err != nil { + return err + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(m.ca.Crt()) + trans := credentials.NewTLS(&tls.Config{ + ServerName: name, + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + }) + + m.waitMx.Lock() + m.waitReady[token] = waitForReady{ + name: name, + cert: pair, + } + m.waitMx.Unlock() + + defer func() { + m.waitMx.Lock() + delete(m.waitReady, token) + m.waitMx.Unlock() + }() + + for { + m.netMx.RLock() + lis := m.listener + srv := m.server + m.netMx.RUnlock() + if lis != nil && srv != nil { + addr := m.getListenAddr() + c, err := grpc.Dial(addr, grpc.WithTransportCredentials(trans)) + if err == nil { + _ = c.Close() + return nil + } + } + + t := time.NewTimer(100 * time.Millisecond) + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + } + } +} + +// Update updates the current state of the running components. +// +// This returns as soon as possible, work is performed in the background to +func (m *Manager) Update(components []component.Component) error { + shuttingDown := m.shuttingDown.Load() + if shuttingDown { + // ignore any updates once shutdown started + return nil + } + // teardown is true because the public `Update` method would be coming directly from + // policy so if a component was removed it needs to be torn down. + return m.update(components, true) +} + +// PerformAction executes an action on a unit. +func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { + id, err := uuid.NewV4() + if err != nil { + return nil, err + } + paramBytes := []byte("{}") + if params != nil { + paramBytes, err = json.Marshal(params) + if err != nil { + return nil, err + } + } + runtime := m.getRuntimeFromUnit(unit) + if runtime == nil { + return nil, ErrNoUnit + } + + req := &proto.ActionRequest{ + Id: id.String(), + Name: name, + Params: paramBytes, + UnitId: unit.ID, + UnitType: proto.UnitType(unit.Type), + Type: proto.ActionRequest_CUSTOM, + } + + res, err := runtime.performAction(ctx, req) + if err != nil { + return nil, err + } + + var respBody map[string]interface{} + if res.Status == proto.ActionResponse_FAILED { + if res.Result != nil { + err = json.Unmarshal(res.Result, &respBody) + if err != nil { + return nil, err + } + errMsgT, ok := respBody["error"] + if ok { + errMsg, ok := errMsgT.(string) + if ok { + return nil, errors.New(errMsg) + } + } + } + return nil, errors.New("generic action failure") + } + if res.Result != nil { + err = json.Unmarshal(res.Result, &respBody) + if err != nil { + return nil, err + } + } + return respBody, nil +} + +// Subscribe to changes in a component. +// +// Allows a component without that ID to exists. Once a component starts matching that ID then changes will start to +// be provided over the channel. +// +// Note: Not reading from a subscription channel will cause the Manager to block. +func (m *Manager) Subscribe(componentID string) *Subscription { + sub := newSubscription(m) + + // add latest to channel + m.mx.RLock() + comp, ok := m.current[componentID] + m.mx.RUnlock() + if ok { + comp.latestMx.RLock() + sub.ch <- comp.latest + comp.latestMx.RUnlock() + } + + // add subscription for future changes + m.subMx.Lock() + m.subscriptions[componentID] = append(m.subscriptions[componentID], sub) + defer m.subMx.Unlock() + + return sub +} + +func (m *Manager) Checkin(_ proto.ElasticAgent_CheckinServer) error { + return status.Error(codes.Unavailable, "removed; upgrade to V2") +} + +func (m *Manager) CheckinV2(server proto.ElasticAgent_CheckinV2Server) error { + initCheckinChan := make(chan *proto.CheckinObserved) + go func() { + // go func will not be leaked, because when the main function + // returns it will close the connection. that will cause this + // function to return. + observed, err := server.Recv() + if err != nil { + close(initCheckinChan) + return + } + initCheckinChan <- observed + }() + + var ok bool + var initCheckin *proto.CheckinObserved + + t := time.NewTimer(initialCheckinTimeout) + select { + case initCheckin, ok = <-initCheckinChan: + t.Stop() + case <-t.C: + // close connection + m.logger.Debug("check-in stream never sent initial observed message; closing connection") + return status.Error(codes.DeadlineExceeded, "never sent initial observed message") + } + if !ok { + // close connection + return nil + } + + runtime := m.getRuntimeFromToken(initCheckin.Token) + if runtime == nil { + // no component runtime with token; close connection + m.logger.Debug("check-in stream sent an invalid token; closing connection") + return status.Error(codes.PermissionDenied, "invalid token") + } + + return runtime.comm.checkin(server, initCheckin) +} + +func (m *Manager) Actions(server proto.ElasticAgent_ActionsServer) error { + initRespChan := make(chan *proto.ActionResponse) + go func() { + // go func will not be leaked, because when the main function + // returns it will close the connection. that will cause this + // function to return. + observed, err := server.Recv() + if err != nil { + close(initRespChan) + return + } + initRespChan <- observed + }() + + var ok bool + var initResp *proto.ActionResponse + + t := time.NewTimer(initialCheckinTimeout) + select { + case initResp, ok = <-initRespChan: + t.Stop() + case <-t.C: + // close connection + m.logger.Debug("actions stream never sent initial response message; closing connection") + return status.Error(codes.DeadlineExceeded, "never sent initial response message") + } + if !ok { + // close connection + return nil + } + if initResp.Id != client.ActionResponseInitID { + // close connection + m.logger.Debug("actions stream first response message must be an init message; closing connection") + return status.Error(codes.InvalidArgument, "initial response must be an init message") + } + + runtime := m.getRuntimeFromToken(initResp.Token) + if runtime == nil { + // no component runtime with token; close connection + m.logger.Debug("actions stream sent an invalid token; closing connection") + return status.Error(codes.PermissionDenied, "invalid token") + } + + return runtime.comm.actions(server) +} + +// update updates the current state of the running components. +// +// This returns as soon as possible, work is performed in the background to +func (m *Manager) update(components []component.Component, teardown bool) error { + m.mx.Lock() + defer m.mx.Unlock() + + touched := make(map[string]bool) + for _, comp := range components { + touched[comp.ID] = true + existing, ok := m.current[comp.ID] + if ok { + // existing component; send runtime updated value + existing.current = comp + if err := existing.runtime.Update(comp); err != nil { + return fmt.Errorf("failed to update component %s: %w", comp.ID, err) + } + } else { + // new component; create its runtime + logger := m.logger.Named(fmt.Sprintf("component.runtime.%s", comp.ID)) + state, err := newComponentRuntimeState(m, logger, comp) + if err != nil { + return fmt.Errorf("failed to create new component %s: %w", comp.ID, err) + } + m.current[comp.ID] = state + err = state.start() + if err != nil { + return fmt.Errorf("failed to start component %s: %w", comp.ID, err) + } + } + } + for id, existing := range m.current { + // skip if already touched (meaning it still existing) + if _, done := touched[id]; done { + continue + } + // component was removed (time to clean it up) + existing.stop(teardown) + } + return nil +} + +func (m *Manager) shutdown() { + m.shuttingDown.Store(true) + + // don't tear down as this is just a shutdown, so components most likely will come back + // on next start of the manager + _ = m.update([]component.Component{}, false) + + // wait until all components are removed + for { + m.mx.Lock() + length := len(m.current) + m.mx.Unlock() + if length <= 0 { + return + } + <-time.After(100 * time.Millisecond) + } +} + +func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentState) { + m.subMx.RLock() + subs, ok := m.subscriptions[state.current.ID] + if ok { + for _, sub := range subs { + sub.ch <- latest + } + } + m.subMx.RUnlock() + + shutdown := state.shuttingDown.Load() + if shutdown && latest.State == client.UnitStateStopped { + // shutdown is complete; remove from current + m.mx.Lock() + delete(m.current, state.current.ID) + m.mx.Unlock() + + state.destroy() + } +} + +func (m *Manager) unsubscribe(subscription *Subscription) { + m.subMx.Lock() + defer m.subMx.Unlock() + for key, subs := range m.subscriptions { + for i, sub := range subs { + if subscription == sub { + m.subscriptions[key] = append(m.subscriptions[key][:i], m.subscriptions[key][i+1:]...) + return + } + } + } +} + +func (m *Manager) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { + var cert *tls.Certificate + + m.mx.RLock() + for _, runtime := range m.current { + if runtime.comm.name == chi.ServerName { + cert = runtime.comm.cert.Certificate + break + } + } + m.mx.RUnlock() + if cert != nil { + return cert, nil + } + + m.waitMx.RLock() + for _, waiter := range m.waitReady { + if waiter.name == chi.ServerName { + cert = waiter.cert.Certificate + break + } + } + m.waitMx.RUnlock() + if cert != nil { + return cert, nil + } + + return nil, errors.New("no supported TLS certificate") +} + +func (m *Manager) getRuntimeFromToken(token string) *componentRuntimeState { + m.mx.RLock() + defer m.mx.RUnlock() + + for _, runtime := range m.current { + if runtime.comm.token == token { + return runtime + } + } + return nil +} + +func (m *Manager) getRuntimeFromUnit(unit component.Unit) *componentRuntimeState { + m.mx.RLock() + defer m.mx.RUnlock() + for _, comp := range m.current { + for _, u := range comp.current.Units { + if u.Type == unit.Type && u.ID == unit.ID { + return comp + } + } + } + return nil +} + +func (m *Manager) getListenAddr() string { + addr := strings.SplitN(m.listenAddr, ":", 2) + if len(addr) == 2 && addr[1] == "0" { + m.netMx.RLock() + lis := m.listener + m.netMx.RUnlock() + if lis != nil { + port := lis.Addr().(*net.TCPAddr).Port + return fmt.Sprintf("%s:%d", addr[0], port) + } + } + return m.listenAddr +} + +type waitForReady struct { + name string + cert *authority.Pair +} diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go new file mode 100644 index 00000000000..ba6cdf8102a --- /dev/null +++ b/pkg/component/runtime/manager_test.go @@ -0,0 +1,1172 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/pkg/core/logger" + "go.elastic.co/apm/apmtest" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/pkg/component" +) + +func TestManager_SimpleComponentErr(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + comp := component.Component{ + ID: "error-default", + Err: errors.New("hard-coded error"), + Units: []component.Unit{ + { + ID: "error-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{}, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("error-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateStarting { + // initial is starting + } else if state.State == client.UnitStateFailed { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "error-input"}] + if ok { + if unit.State == client.UnitStateFailed { + // should be failed + subErrCh <- nil + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } + } else { + subErrCh <- fmt.Errorf("component reported unexpected state: %v", state.State) + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_StartStop(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{}, + }, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + // remove the component which will stop it + err := m.Update([]component.Component{}) + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateStopped { + subErrCh <- nil + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_Configure(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{}, + }, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + // update config to change the state to degraded + comp.Units[0].Config = map[string]interface{}{ + "type": "fake", + "state": client.UnitStateDegraded, + "message": "Fake Degraded", + } + err := m.Update([]component.Component{comp}) + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateDegraded { + subErrCh <- nil + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_RemoveUnit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{}, + }, + }, + Units: []component.Unit{ + { + ID: "fake-input-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 0", + }, + }, + { + ID: "fake-input-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 1", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + unit1Stopped := false + + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit0, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input-0"}] + if ok { + if unit0.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit 0 failed: %s", unit0.Message) + } else if unit0.State == client.UnitStateStarting || unit0.State == client.UnitStateHealthy { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit 0 reported unexpected state: %v", unit0.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input-0") + } + unit1, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input-1"}] + if ok { + if unit1.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit 1 failed: %s", unit1.Message) + } else if unit1.State == client.UnitStateHealthy { + // unit1 is healthy lets remove it from the component + comp.Units = comp.Units[0:1] + err := m.Update([]component.Component{comp}) + if err != nil { + subErrCh <- err + } + } else if unit1.State == client.UnitStateStarting || unit1.State == client.UnitStateStopping { + // acceptable + } else if unit1.State == client.UnitStateStopped { + // unit should have been reported stopped before being removed + unit1Stopped = true + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit 1 reported unexpected state: %v", unit1.State) + } + } else { + if len(comp.Units) == 1 { + if unit1Stopped { + // unit reported stopped then removed (perfect!) + subErrCh <- nil + } else { + // never reported stopped + subErrCh <- errors.New("unit 1 removed but not reported stop first") + } + } else { + // should not be removed + subErrCh <- errors.New("unit missing: fake-input-1") + } + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_ActionState(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{}, + }, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + actionCtx, actionCancel := context.WithTimeout(context.Background(), 3*time.Second) + _, err := m.PerformAction(actionCtx, comp.Units[0], "set_state", map[string]interface{}{ + "state": client.UnitStateDegraded, + "message": "Action Set Degraded", + }) + actionCancel() + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateDegraded { + // action set it to degraded + subErrCh <- nil + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_Restarts(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{}, + }, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + killed := false + + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + if !killed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + // expected to go to failed as it was killed with the action + } + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + if !killed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else { + // expected to go to failed as it was killed with the action + } + } else if unit.State == client.UnitStateHealthy { + // force the input to exit and it should be restarted + if !killed { + killed = true + actionCtx, actionCancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + _, err := m.PerformAction(actionCtx, comp.Units[0], "kill", nil) + actionCancel() + if !errors.Is(err, context.DeadlineExceeded) { + // should have got deadline exceeded for this call + if err == nil { + err = fmt.Errorf("should have got deadline exceeded") + } else { + err = fmt.Errorf("should have got deadline exceeded, instead got: %w", err) + } + subErrCh <- err + } + } else { + // got back to healthy after kill + subErrCh <- nil + } + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_InvalidAction(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{}, + }, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + actionCtx, actionCancel := context.WithTimeout(context.Background(), 5*time.Second) + _, err := m.PerformAction(actionCtx, comp.Units[0], "invalid_missing_action", nil) + actionCancel() + if err == nil { + subErrCh <- fmt.Errorf("should have returned an error") + } else if err.Error() != "action undefined" { + subErrCh <- fmt.Errorf("should have returned error: action undefined") + } else { + subErrCh <- nil + } + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_MultiComponent(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + runtimeSpec := component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{}, + }, + } + components := []component.Component{ + { + ID: "fake-0", + Spec: runtimeSpec, + Units: []component.Unit{ + { + ID: "fake-input-0-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 0-0", + }, + }, + { + ID: "fake-input-0-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 0-1", + }, + }, + { + ID: "fake-input-0-2", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 0-2", + }, + }, + }, + }, + { + ID: "fake-1", + Spec: runtimeSpec, + Units: []component.Unit{ + { + ID: "fake-input-1-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 1-0", + }, + }, + { + ID: "fake-input-1-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 1-1", + }, + }, + { + ID: "fake-input-1-2", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 1-2", + }, + }, + }, + }, + { + ID: "fake-2", + Spec: runtimeSpec, + Units: []component.Unit{ + { + ID: "fake-input-2-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 2-0", + }, + }, + { + ID: "fake-input-2-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 2-1", + }, + }, + { + ID: "fake-input-2-2", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 2-2", + }, + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh0 := make(chan error) + subErrCh1 := make(chan error) + subErrCh2 := make(chan error) + go func() { + sub0 := m.Subscribe("fake-0") + defer sub0.Unsubscribe() + sub1 := m.Subscribe("fake-1") + defer sub1.Unsubscribe() + sub2 := m.Subscribe("fake-2") + defer sub2.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub0.Ch(): + t.Logf("component fake-0 state changed: %+v", state) + signalState(subErrCh0, &state) + case state := <-sub1.Ch(): + t.Logf("component fake-1 state changed: %+v", state) + signalState(subErrCh1, &state) + case state := <-sub2.Ch(): + t.Logf("component fake-2 state changed: %+v", state) + signalState(subErrCh2, &state) + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh0) + defer drainErrChan(subErrCh1) + defer drainErrChan(subErrCh2) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update(components) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + count := 0 + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh0: + require.NoError(t, err) + count += 1 + if count >= 3 { + break LOOP + } + case err := <-subErrCh1: + require.NoError(t, err) + count += 1 + if count >= 3 { + break LOOP + } + case err := <-subErrCh2: + require.NoError(t, err) + count += 1 + if count >= 3 { + break LOOP + } + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func newErrorLogger(t *testing.T) *logger.Logger { + t.Helper() + + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.ErrorLevel + + log, err := logger.NewFromConfig("", loggerCfg, false) + require.NoError(t, err) + return log +} + +func drainErrChan(ch chan error) { + for { + select { + case <-ch: + default: + return + } + } +} + +func signalState(subErrCh chan error, state *ComponentState) { + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + issue := "" + healthy := 0 + for key, unit := range state.Units { + if unit.State == client.UnitStateStarting { + // acceptable + } else if unit.State == client.UnitStateHealthy { + healthy += 1 + } else if issue == "" { + issue = fmt.Sprintf("unit %s in invalid state %v", key.UnitID, unit.State) + } + } + if issue != "" { + subErrCh <- fmt.Errorf("%s", issue) + } + if healthy == 3 { + subErrCh <- nil + } + } +} diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go new file mode 100644 index 00000000000..6214ea573c7 --- /dev/null +++ b/pkg/component/runtime/runtime.go @@ -0,0 +1,353 @@ +package runtime + +import ( + "context" + "encoding/json" + "errors" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent-libs/atomic" + "github.com/elastic/elastic-agent/pkg/core/logger" + "sync" + + "github.com/elastic/elastic-agent/pkg/component" +) + +// ComponentUnitState is the state for a unit running in a component. +type ComponentUnitState struct { + State client.UnitState + Message string + Payload map[string]interface{} + + // internal + configStateIdx uint64 + config map[string]interface{} + payloadStr string +} + +// ComponentUnitKey is a composite key to identify a unit by its type and ID. +type ComponentUnitKey struct { + UnitType client.UnitType + UnitID string +} + +// ComponentState is the overall state of the component. +type ComponentState struct { + State client.UnitState + Message string + + Units map[ComponentUnitKey]ComponentUnitState +} + +func newComponentState(comp *component.Component, initState client.UnitState, initMessage string, initCfgIdx uint64) (s ComponentState) { + s.Units = make(map[ComponentUnitKey]ComponentUnitState) + s.syncComponent(comp, initState, initMessage, initCfgIdx) + return s +} + +// Copy returns a copy of the structure. +func (s *ComponentState) Copy() (c ComponentState) { + c = *s + c.Units = make(map[ComponentUnitKey]ComponentUnitState) + for k, v := range s.Units { + c.Units[k] = v + } + return c +} + +func (s *ComponentState) syncComponent(comp *component.Component, initState client.UnitState, initMessage string, initCfgIdx uint64) { + s.State = initState + s.Message = initMessage + touched := make(map[ComponentUnitKey]bool) + for _, unit := range comp.Units { + key := ComponentUnitKey{ + UnitType: unit.Type, + UnitID: unit.ID, + } + + touched[key] = true + existing, ok := s.Units[key] + existing.State = initState + existing.Message = initMessage + existing.Payload = nil + existing.config = unit.Config + if ok { + existing.configStateIdx += 1 + } else { + existing.configStateIdx = initCfgIdx + } + s.Units[key] = existing + } + for key, unit := range s.Units { + _, ok := touched[key] + if !ok { + if unit.State != client.UnitStateStopped { + unit.State = client.UnitStateStopped + unit.Message = "Stopped" + + // unit is a copy and must be set back into the map + s.Units[key] = unit + } + } + } +} + +func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { + changed := false + touched := make(map[ComponentUnitKey]bool) + for _, unit := range checkin.Units { + key := ComponentUnitKey{ + UnitType: client.UnitType(unit.Type), + UnitID: unit.Id, + } + + var payloadStr string + var payload map[string]interface{} + if unit.Payload != nil { + payloadStr = string(unit.Payload) + // err is ignored (must be valid JSON for Agent to use it) + _ = json.Unmarshal(unit.Payload, &payload) + } + + touched[key] = true + existing, ok := s.Units[key] + if !ok { + changed = true + existing = ComponentUnitState{ + State: client.UnitState(unit.State), + Message: unit.Message, + Payload: payload, + configStateIdx: unit.ConfigStateIdx, + payloadStr: payloadStr, + } + } else { + existing.configStateIdx = unit.ConfigStateIdx + if existing.State != client.UnitState(unit.State) || existing.Message != unit.Message || existing.payloadStr != payloadStr { + changed = true + existing.State = client.UnitState(unit.State) + existing.Message = unit.Message + existing.Payload = payload + existing.payloadStr = payloadStr + } + } + s.Units[key] = existing + } + for key, unit := range s.Units { + _, ok := touched[key] + if !ok { + unit.configStateIdx = 0 + if unit.State != client.UnitStateStarting { + state := client.UnitStateFailed + msg := "Failed: not reported in check-in" + payloadStr := "" + if unit.State != state || unit.Message != msg || unit.payloadStr != payloadStr { + changed = true + unit.State = state + unit.Message = msg + unit.Payload = nil + unit.payloadStr = payloadStr + + // unit is a copy and must be set back into the map + s.Units[key] = unit + } + } + } + } + return changed +} + +// ComponentRuntime manages runtime lifecycle operations for a component and stores its state. +type ComponentRuntime interface { + // Run starts the runtime for the component. + // + // Called by Manager inside a go-routine. Run should not return until the passed in context is done. Run is always + // called before any of the other methods in the interface and once the context is done none of those methods will + // ever be called again. + Run(ctx context.Context, comm Communicator) + // Watch returns the channel that sends component state. + // + // Channel should send a new state anytime a state for a unit or the whole component changes. + Watch() <-chan ComponentState + // Start starts the component. + // + // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. + Start() error + // Update updates the current runtime with a new-revision for the component definition. + // + // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. + Update(comp component.Component) error + // Stop stops the component. + // + // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. + // + // Used to stop the running component. This is used when it will be restarted or upgraded. If the component + // is being completely removed Teardown will be used instead. + Stop() error + // Teardown both stops and performs cleanup for the component. + // + // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. + // + // Used to tell control the difference between stopping a component to restart it or upgrade it, versus + // the component being completely removed. + Teardown() error +} + +// NewComponentRuntime creates the proper runtime based on the input specification for the component. +func NewComponentRuntime(comp component.Component) (ComponentRuntime, error) { + if comp.Err != nil { + return NewFailedRuntime(comp) + } else if comp.Spec.Spec.Command != nil { + return NewCommandRuntime(comp) + } else if comp.Spec.Spec.Service != nil { + return nil, errors.New("service component runtime not implemented") + } + return nil, errors.New("unknown component runtime") +} + +type componentRuntimeState struct { + manager *Manager + logger *logger.Logger + comm *runtimeComm + + current component.Component + runtime ComponentRuntime + + shuttingDown atomic.Bool + + latestMx sync.RWMutex + latest ComponentState + + watchChan chan bool + watchCanceller context.CancelFunc + + runChan chan bool + runCanceller context.CancelFunc + + actionsMx sync.Mutex + actions map[string]func(*proto.ActionResponse) +} + +func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component.Component) (*componentRuntimeState, error) { + comm, err := newRuntimeComm(logger, m.getListenAddr(), m.ca) + if err != nil { + return nil, err + } + runtime, err := NewComponentRuntime(comp) + if err != nil { + return nil, err + } + + watchChan := make(chan bool) + runChan := make(chan bool) + state := &componentRuntimeState{ + manager: m, + logger: logger, + comm: comm, + current: comp, + runtime: runtime, + latest: ComponentState{ + State: client.UnitStateStarting, + Message: "Starting", + Units: nil, + }, + watchChan: watchChan, + runChan: runChan, + actions: make(map[string]func(response *proto.ActionResponse)), + } + + // start the go-routine that watches for updates from the component + watchCtx, watchCanceller := context.WithCancel(context.Background()) + state.watchCanceller = watchCanceller + go func() { + defer close(watchChan) + for { + select { + case <-watchCtx.Done(): + return + case s := <-runtime.Watch(): + state.latestMx.Lock() + state.latest = s + state.latestMx.Unlock() + state.manager.stateChanged(state, s) + case ar := <-comm.actionsResponse: + state.actionsMx.Lock() + callback, ok := state.actions[ar.Id] + if ok { + delete(state.actions, ar.Id) + } + state.actionsMx.Unlock() + callback(ar) + } + } + }() + + // start the go-routine that operates the runtime for the component + runCtx, runCanceller := context.WithCancel(context.Background()) + state.runCanceller = runCanceller + go func() { + defer close(runChan) + defer comm.destroy() + runtime.Run(runCtx, comm) + }() + + return state, nil +} + +func (s *componentRuntimeState) start() error { + return s.runtime.Start() +} + +func (s *componentRuntimeState) stop(teardown bool) error { + s.shuttingDown.Store(true) + if teardown { + return s.runtime.Teardown() + } else { + return s.runtime.Stop() + } +} + +func (s *componentRuntimeState) destroy() { + if s.runCanceller != nil { + s.runCanceller() + s.runCanceller = nil + <-s.runChan + } + if s.watchCanceller != nil { + s.watchCanceller() + s.watchCanceller = nil + <-s.watchChan + } +} + +func (s *componentRuntimeState) performAction(ctx context.Context, req *proto.ActionRequest) (*proto.ActionResponse, error) { + ch := make(chan *proto.ActionResponse) + callback := func(response *proto.ActionResponse) { + ch <- response + } + + s.actionsMx.Lock() + s.actions[req.Id] = callback + s.actionsMx.Unlock() + + select { + case <-ctx.Done(): + s.actionsMx.Lock() + delete(s.actions, req.Id) + s.actionsMx.Unlock() + return nil, ctx.Err() + case s.comm.actionsRequest <- req: + } + + var resp *proto.ActionResponse + + select { + case <-ctx.Done(): + s.actionsMx.Lock() + delete(s.actions, req.Id) + s.actionsMx.Unlock() + return nil, ctx.Err() + case resp = <-ch: + } + + return resp, nil +} diff --git a/pkg/component/runtime/runtime_comm.go b/pkg/component/runtime/runtime_comm.go new file mode 100644 index 00000000000..3a15a79dc8c --- /dev/null +++ b/pkg/component/runtime/runtime_comm.go @@ -0,0 +1,311 @@ +package runtime + +import ( + "errors" + "fmt" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + protobuf "google.golang.org/protobuf/proto" + "io" + "strings" + "sync" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/gofrs/uuid" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/elastic/elastic-agent/internal/pkg/core/authority" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +// Communicator provides an interface for a runtime to communicate with its running component. +type Communicator interface { + // WriteConnInfo writes the connection information to the writer, informing the component it has access + // to the provided services. + WriteConnInfo(w io.Writer, services ...client.Service) error + // CheckinExpected sends the expected state to the component. + CheckinExpected(expected *proto.CheckinExpected) + // CheckinObserved receives the observed state from the component. + CheckinObserved() <-chan *proto.CheckinObserved +} + +type runtimeComm struct { + logger *logger.Logger + listenAddr string + ca *authority.CertificateAuthority + + name string + token string + cert *authority.Pair + + checkinConn bool + checkinDone chan bool + checkinLock sync.RWMutex + checkinExpected chan *proto.CheckinExpected + checkinObserved chan *proto.CheckinObserved + + actionsConn bool + actionsDone chan bool + actionsLock sync.RWMutex + actionsRequest chan *proto.ActionRequest + actionsResponse chan *proto.ActionResponse +} + +func newRuntimeComm(logger *logger.Logger, listenAddr string, ca *authority.CertificateAuthority) (*runtimeComm, error) { + token, err := uuid.NewV4() + if err != nil { + return nil, err + } + name, err := genServerName() + if err != nil { + return nil, err + } + pair, err := ca.GeneratePairWithName(name) + if err != nil { + return nil, err + } + return &runtimeComm{ + logger: logger, + listenAddr: listenAddr, + ca: ca, + name: name, + token: token.String(), + cert: pair, + checkinConn: true, + checkinExpected: make(chan *proto.CheckinExpected), + checkinObserved: make(chan *proto.CheckinObserved), + actionsConn: true, + actionsRequest: make(chan *proto.ActionRequest), + actionsResponse: make(chan *proto.ActionResponse), + }, nil +} + +func (c *runtimeComm) WriteConnInfo(w io.Writer, services ...client.Service) error { + hasV2 := false + srvs := make([]proto.ConnInfoServices, 0, len(services)) + for _, srv := range services { + if srv == client.ServiceCheckin { + return fmt.Errorf("cannot provide access to v1 checkin service") + } + if srv == client.ServiceCheckinV2 { + hasV2 = true + } + srvs = append(srvs, proto.ConnInfoServices(srv)) + } + if !hasV2 { + srvs = append(srvs, proto.ConnInfoServices_CheckinV2) + } + connInfo := &proto.ConnInfo{ + Addr: c.listenAddr, + ServerName: c.name, + Token: c.token, + CaCert: c.ca.Crt(), + PeerCert: c.cert.Crt, + PeerKey: c.cert.Key, + Services: srvs, + } + infoBytes, err := protobuf.Marshal(connInfo) + if err != nil { + return fmt.Errorf("failed to marshal connection information: %w", err) + } + _, err = w.Write(infoBytes) + if err != nil { + return fmt.Errorf("failed to write connection information: %w", err) + } + return nil +} + +func (c *runtimeComm) CheckinExpected(expected *proto.CheckinExpected) { + c.checkinExpected <- expected +} + +func (c *runtimeComm) CheckinObserved() <-chan *proto.CheckinObserved { + return c.checkinObserved +} + +func (c *runtimeComm) checkin(server proto.ElasticAgent_CheckinV2Server, init *proto.CheckinObserved) error { + c.checkinLock.Lock() + if c.checkinDone != nil { + // already connected (cannot have multiple); close connection + c.checkinLock.Unlock() + c.logger.Debug("check-in stream already connected for component; closing connection") + return status.Error(codes.AlreadyExists, "component already connected") + } + if !c.checkinConn { + // being destroyed cannot reconnect; close connection + c.checkinLock.Unlock() + c.logger.Debug("check-in stream being destroyed connection not allowed; closing connection") + return status.Error(codes.Unavailable, "component cannot connect being destroyed") + } + + checkinDone := make(chan bool) + c.checkinDone = checkinDone + c.checkinLock.Unlock() + + defer func() { + c.checkinLock.Lock() + c.checkinDone = nil + c.checkinLock.Unlock() + }() + + recvDone := make(chan bool) + sendDone := make(chan bool) + go func() { + defer func() { + close(sendDone) + }() + for { + var expected *proto.CheckinExpected + select { + case <-checkinDone: + return + case <-recvDone: + return + case expected = <-c.checkinExpected: + } + + err := server.Send(expected) + if err != nil { + if reportableErr(err) { + c.logger.Debugf("check-in stream failed to send expected state: %s", err) + } + return + } + } + }() + + c.checkinObserved <- init + + go func() { + for { + checkin, err := server.Recv() + if err != nil { + if reportableErr(err) { + c.logger.Debugf("check-in stream failed to receive data: %s", err) + } + close(recvDone) + return + } + c.checkinObserved <- checkin + } + }() + + <-sendDone + return nil +} + +func (c *runtimeComm) actions(server proto.ElasticAgent_ActionsServer) error { + c.actionsLock.Lock() + if c.actionsDone != nil { + // already connected (cannot have multiple); close connection + c.actionsLock.Unlock() + c.logger.Debug("check-in stream already connected for component; closing connection") + return status.Error(codes.AlreadyExists, "application already connected") + } + if !c.actionsConn { + // being destroyed cannot reconnect; close connection + c.actionsLock.Unlock() + c.logger.Debug("check-in stream being destroyed connection not allowed; closing connection") + return status.Error(codes.Unavailable, "application cannot connect being destroyed") + } + + actionsDone := make(chan bool) + c.actionsDone = actionsDone + c.actionsLock.Unlock() + + defer func() { + c.actionsLock.Lock() + c.actionsDone = nil + c.actionsLock.Unlock() + }() + + recvDone := make(chan bool) + sendDone := make(chan bool) + go func() { + defer func() { + close(sendDone) + }() + for { + var req *proto.ActionRequest + select { + case <-actionsDone: + return + case <-recvDone: + return + case req = <-c.actionsRequest: + } + + err := server.Send(req) + if err != nil { + if reportableErr(err) { + c.logger.Debugf("actions stream failed to send action request: %s", err) + } + return + } + } + }() + + go func() { + for { + resp, err := server.Recv() + if err != nil { + if reportableErr(err) { + c.logger.Debugf("check-in stream failed to receive data: %s", err) + } + close(recvDone) + return + } + c.actionsResponse <- resp + } + }() + + <-sendDone + return nil +} + +func (c *runtimeComm) destroy() { + c.destroyCheckin() + c.destroyActions() +} + +func (c *runtimeComm) destroyCheckin() { + c.checkinLock.Lock() + c.checkinConn = false + if c.checkinDone != nil { + close(c.checkinDone) + c.checkinDone = nil + } + c.checkinLock.Unlock() +} + +func (c *runtimeComm) destroyActions() { + c.actionsLock.Lock() + c.actionsConn = false + if c.actionsDone != nil { + close(c.actionsDone) + c.actionsDone = nil + } + c.actionsLock.Unlock() +} + +func reportableErr(err error) bool { + if errors.Is(err, io.EOF) { + return false + } + s, ok := status.FromError(err) + if !ok { + return true + } + if s.Code() == codes.Canceled { + return false + } + return true +} + +func genServerName() (string, error) { + u, err := uuid.NewV4() + if err != nil { + return "", err + } + return strings.Replace(u.String(), "-", "", -1), nil +} diff --git a/pkg/component/runtime/subscription.go b/pkg/component/runtime/subscription.go new file mode 100644 index 00000000000..c9574aaa030 --- /dev/null +++ b/pkg/component/runtime/subscription.go @@ -0,0 +1,24 @@ +package runtime + +// Subscription provides a channel for notifications on a component state. +type Subscription struct { + manager *Manager + ch chan ComponentState +} + +func newSubscription(manager *Manager) *Subscription { + return &Subscription{ + manager: manager, + ch: make(chan ComponentState, 1), // buffer of 1 to allow initial latest state + } +} + +// Ch provides the channel to get state changes. +func (s *Subscription) Ch() <-chan ComponentState { + return s.ch +} + +// Unsubscribe removes the subscription. +func (s *Subscription) Unsubscribe() { + s.manager.unsubscribe(s) +} diff --git a/pkg/component/spec.go b/pkg/component/spec.go index bfd0efedb86..dc916d7805b 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -56,8 +56,9 @@ type RuntimePreventionSpec struct { // CommandSpec is the specification for an input that executes as a subprocess. type CommandSpec struct { - Args []string `config:"args,omitempty" yaml:"args,omitempty"` - Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` + Args []string `config:"args,omitempty" yaml:"args,omitempty"` + Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` + Timeouts CommandTimeoutSpec `config:"timeouts" yaml:"timeouts"` } // CommandEnvSpec is the specification that defines environment variables that will be set to execute the subprocess. @@ -66,6 +67,18 @@ type CommandEnvSpec struct { Value string `config:"value" yaml:"value" validate:"required"` } +// CommandTimeoutSpec is the timeout specification for subprocess. +type CommandTimeoutSpec struct { + Spawn time.Duration `config:"spawn" yaml:"spawn"` + Stop time.Duration `config:"stop" yaml:"stop"` +} + +// InitDefaults initialized the defaults for the timeouts. +func (t *CommandTimeoutSpec) InitDefaults() { + t.Spawn = 30 * time.Second + t.Stop = 30 * time.Second +} + // ServiceSpec is the specification for an input that executes as a service. type ServiceSpec struct { Operations ServiceOperationsSpec `config:"operations" yaml:"operations" validate:"required"` diff --git a/internal/pkg/core/process/cmd.go b/pkg/core/process/cmd.go similarity index 78% rename from internal/pkg/core/process/cmd.go rename to pkg/core/process/cmd.go index a43ea7d62a6..898b2f0aab1 100644 --- a/internal/pkg/core/process/cmd.go +++ b/pkg/core/process/cmd.go @@ -12,11 +12,9 @@ import ( "os" "os/exec" "path/filepath" - - "github.com/elastic/elastic-agent/pkg/core/logger" ) -func getCmd(ctx context.Context, logger *logger.Logger, path string, env []string, uid, gid int, arg ...string) *exec.Cmd { +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { var cmd *exec.Cmd if ctx == nil { cmd = exec.Command(path, arg...) @@ -27,7 +25,11 @@ func getCmd(ctx context.Context, logger *logger.Logger, path string, env []strin cmd.Env = append(cmd.Env, env...) cmd.Dir = filepath.Dir(path) - return cmd + return cmd, nil +} + +func killCmd(proc *os.Process) error { + return proc.Kill() } func terminateCmd(proc *os.Process) error { diff --git a/internal/pkg/core/process/cmd_darwin.go b/pkg/core/process/cmd_darwin.go similarity index 77% rename from internal/pkg/core/process/cmd_darwin.go rename to pkg/core/process/cmd_darwin.go index aa4b96d827e..4533b351e38 100644 --- a/internal/pkg/core/process/cmd_darwin.go +++ b/pkg/core/process/cmd_darwin.go @@ -9,16 +9,15 @@ package process import ( "context" + "fmt" "math" "os" "os/exec" "path/filepath" "syscall" - - "github.com/elastic/elastic-agent/pkg/core/logger" ) -func getCmd(ctx context.Context, logger *logger.Logger, path string, env []string, uid, gid int, arg ...string) *exec.Cmd { +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { var cmd *exec.Cmd if ctx == nil { cmd = exec.Command(path, arg...) @@ -37,16 +36,20 @@ func getCmd(ctx context.Context, logger *logger.Logger, path string, env []strin }, } } else { - logger.Errorf("provided uid or gid for %s is invalid. uid: '%d' gid: '%d'.", path, uid, gid) + return nil, fmt.Errorf("invalid uid: '%d' or gid: '%d'", uid, gid) } - return cmd + return cmd, nil } func isInt32(val int) bool { return val >= 0 && val <= math.MaxInt32 } +func killCmd(proc *os.Process) error { + return proc.Kill() +} + func terminateCmd(proc *os.Process) error { return proc.Signal(syscall.SIGTERM) } diff --git a/internal/pkg/core/process/cmd_linux.go b/pkg/core/process/cmd_linux.go similarity index 81% rename from internal/pkg/core/process/cmd_linux.go rename to pkg/core/process/cmd_linux.go index ffaa62e577c..958b3e832e9 100644 --- a/internal/pkg/core/process/cmd_linux.go +++ b/pkg/core/process/cmd_linux.go @@ -14,11 +14,9 @@ import ( "os/exec" "path/filepath" "syscall" - - "github.com/elastic/elastic-agent/pkg/core/logger" ) -func getCmd(ctx context.Context, logger *logger.Logger, path string, env []string, uid, gid int, arg ...string) *exec.Cmd { +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { var cmd *exec.Cmd if ctx == nil { cmd = exec.Command(path, arg...) @@ -40,7 +38,7 @@ func getCmd(ctx context.Context, logger *logger.Logger, path string, env []strin }, } } else { - logger.Errorf("provided uid or gid for %s is invalid. uid: '%d' gid: '%d'.", path, uid, gid) + return nil, fmt.Errorf("invalid uid: '%d' or gid: '%d'", uid, gid) } return cmd @@ -50,6 +48,10 @@ func isInt32(val int) bool { return val >= 0 && val <= math.MaxInt32 } +func killCmd(proc *os.Process) error { + return proc.Kill() +} + func terminateCmd(proc *os.Process) error { return proc.Signal(syscall.SIGTERM) } diff --git a/internal/pkg/core/process/config.go b/pkg/core/process/config.go similarity index 100% rename from internal/pkg/core/process/config.go rename to pkg/core/process/config.go diff --git a/pkg/core/process/external_unix.go b/pkg/core/process/external_unix.go new file mode 100644 index 00000000000..c97b02ed58f --- /dev/null +++ b/pkg/core/process/external_unix.go @@ -0,0 +1,30 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows +// +build !windows + +package process + +import ( + "os" + "syscall" + "time" +) + +// externalProcess is a watch mechanism used in cases where OS requires a process to be a child +// for waiting for process. We need to be able to await any process. +func externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + <-time.After(1 * time.Second) + if proc.Signal(syscall.Signal(0)) != nil { + // failed to contact process, return + return + } + } +} diff --git a/pkg/core/process/external_windows.go b/pkg/core/process/external_windows.go new file mode 100644 index 00000000000..d6898588889 --- /dev/null +++ b/pkg/core/process/external_windows.go @@ -0,0 +1,53 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows +// +build windows + +package process + +import ( + "os" + "syscall" + "time" +) + +const ( + // exitCodeStillActive according to docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess + exitCodeStillActive = 259 +) + +// externalProcess is a watch mechanism used in cases where OS requires a process to be a child +// for waiting for process. We need to be able to await any process. +func externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + <-time.After(1 * time.Second) + if isWindowsProcessExited(proc.Pid) { + return + } + } +} + +func isWindowsProcessExited(pid int) bool { + const desiredAccess = syscall.STANDARD_RIGHTS_READ | syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE + h, err := syscall.OpenProcess(desiredAccess, false, uint32(pid)) + if err != nil { + // failed to open handle, report exited + return true + } + + // get exit code, this returns immediately in case it is still running + // it returns exitCodeStillActive + var ec uint32 + if err := syscall.GetExitCodeProcess(h, &ec); err != nil { + // failed to contact, report exited + return true + } + + return ec != exitCodeStillActive +} diff --git a/internal/pkg/agent/cmd/proc/job_unix.go b/pkg/core/process/job_unix.go similarity index 97% rename from internal/pkg/agent/cmd/proc/job_unix.go rename to pkg/core/process/job_unix.go index b336575c72c..72d0386cade 100644 --- a/internal/pkg/agent/cmd/proc/job_unix.go +++ b/pkg/core/process/job_unix.go @@ -5,7 +5,7 @@ //go:build !windows // +build !windows -package proc +package process import ( "os" diff --git a/internal/pkg/agent/cmd/proc/job_windows.go b/pkg/core/process/job_windows.go similarity index 99% rename from internal/pkg/agent/cmd/proc/job_windows.go rename to pkg/core/process/job_windows.go index bff42183d71..214d75d9b3c 100644 --- a/internal/pkg/agent/cmd/proc/job_windows.go +++ b/pkg/core/process/job_windows.go @@ -5,7 +5,7 @@ //go:build windows // +build windows -package proc +package process import ( "os" diff --git a/pkg/core/process/process.go b/pkg/core/process/process.go new file mode 100644 index 00000000000..73ff4e9bfbf --- /dev/null +++ b/pkg/core/process/process.go @@ -0,0 +1,101 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package process + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" +) + +// Info groups information about fresh new process +type Info struct { + PID int + Process *os.Process + Stdin io.WriteCloser +} + +// Option is an option func to change the underlying command +type Option func(c *exec.Cmd) error + +// Start starts a new process +func Start(path string, uid, gid int, args []string, env []string, opts ...Option) (proc *Info, err error) { + return StartContext(nil, path, uid, gid, args, env, opts...) //nolint:staticcheck // calls a different function if no ctx +} + +// StartContext starts a new process with context. +func StartContext(ctx context.Context, path string, uid, gid int, args []string, env []string, opts ...Option) (*Info, error) { + cmd, err := getCmd(ctx, path, env, uid, gid, args...) + if err != nil { + return nil, fmt.Errorf("failed to create command for '%s': %w", path, err) + } + for _, o := range opts { + if err := o(cmd); err != nil { + return nil, fmt.Errorf("failed to set option command for '%s': %w", path, err) + } + } + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdin for '%s': %w", path, err) + } + + // start process + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start '%s': %w", path, err) + } + + // Hook to JobObject on windows, noop on other platforms. + // This ties the application processes lifespan to the agent's. + // Fixes the orphaned beats processes left behind situation + // after the agent process gets killed. + if err := JobObject.Assign(cmd.Process); err != nil { + _ = killCmd(cmd.Process) + return nil, fmt.Errorf("failed job assignment '%s': %w", path, err) + } + + return &Info{ + PID: cmd.Process.Pid, + Process: cmd.Process, + Stdin: stdin, + }, err +} + +// Kill kills the process. +func (i *Info) Kill() error { + return killCmd(i.Process) +} + +// Stop stops the process cleanly. +func (i *Info) Stop() error { + return terminateCmd(i.Process) +} + +// StopWait stops the process and waits for it to exit. +func (i *Info) StopWait() error { + err := i.Stop() + if err != nil { + return err + } + _, err = i.Process.Wait() + return err +} + +// Wait returns a channel that will send process state once it exits. +func (i *Info) Wait() <-chan *os.ProcessState { + ch := make(chan *os.ProcessState) + + go func() { + procState, err := i.Process.Wait() + if err != nil { + // process is not a child - some OSs requires process to be child + externalProcess(i.Process) + } + ch <- procState + }() + + return ch +} From 148b1ddb779d30b758a3fc966b3a2b346b0fc1a4 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 29 Jun 2022 21:20:39 -0400 Subject: [PATCH 02/19] Fix imports. --- internal/pkg/agent/cmd/container.go | 2 +- internal/pkg/agent/cmd/enroll_cmd.go | 2 +- internal/pkg/agent/configuration/settings.go | 2 +- internal/pkg/agent/operation/common_test.go | 2 +- internal/pkg/agent/operation/monitoring_test.go | 2 +- internal/pkg/core/monitoring/server/processes_test.go | 2 +- internal/pkg/core/plugin/process/app.go | 2 +- internal/pkg/core/plugin/process/status.go | 5 ++--- internal/pkg/core/plugin/service/app.go | 2 +- internal/pkg/core/state/state.go | 3 ++- main.go | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index ed911996508..ff1b40936d1 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/elastic/elastic-agent/pkg/core/process" "io" "io/ioutil" "net/url" @@ -39,6 +38,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/version" ) diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index 74f63b1660c..a886ca5bafb 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -8,7 +8,6 @@ import ( "bytes" "context" "fmt" - "github.com/elastic/elastic-agent/pkg/core/process" "io" "io/ioutil" "math/rand" @@ -43,6 +42,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/internal/pkg/remote" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" ) const ( diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index ee7f85558e0..7445f02a462 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -5,13 +5,13 @@ package configuration import ( - "github.com/elastic/elastic-agent/pkg/core/process" "path/filepath" "github.com/elastic/elastic-agent/internal/pkg/artifact" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/agent/operation/common_test.go b/internal/pkg/agent/operation/common_test.go index f351e95f18a..aadc65b5db9 100644 --- a/internal/pkg/agent/operation/common_test.go +++ b/internal/pkg/agent/operation/common_test.go @@ -6,7 +6,6 @@ package operation import ( "context" - "github.com/elastic/elastic-agent/pkg/core/process" "os" "path/filepath" "runtime" @@ -29,6 +28,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/agent/operation/monitoring_test.go b/internal/pkg/agent/operation/monitoring_test.go index d98c2c5362b..c64ede2a8f1 100644 --- a/internal/pkg/agent/operation/monitoring_test.go +++ b/internal/pkg/agent/operation/monitoring_test.go @@ -7,7 +7,6 @@ package operation import ( "context" "fmt" - "github.com/elastic/elastic-agent/pkg/core/process" "testing" "time" @@ -31,6 +30,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/core/status" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/core/monitoring/server/processes_test.go b/internal/pkg/core/monitoring/server/processes_test.go index e739778ae77..f700274a16b 100644 --- a/internal/pkg/core/monitoring/server/processes_test.go +++ b/internal/pkg/core/monitoring/server/processes_test.go @@ -8,7 +8,6 @@ import ( "bytes" "encoding/json" "fmt" - "github.com/elastic/elastic-agent/pkg/core/process" "net/http" "os" "testing" @@ -18,6 +17,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/sorted" + "github.com/elastic/elastic-agent/pkg/core/process" ) func TestProcesses(t *testing.T) { diff --git a/internal/pkg/core/plugin/process/app.go b/internal/pkg/core/plugin/process/app.go index c7dc7169f8b..f5e1afa085b 100644 --- a/internal/pkg/core/plugin/process/app.go +++ b/internal/pkg/core/plugin/process/app.go @@ -7,7 +7,6 @@ package process import ( "context" "fmt" - process2 "github.com/elastic/elastic-agent/pkg/core/process" "os" "reflect" "sync" @@ -24,6 +23,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" "github.com/elastic/elastic-agent/pkg/core/logger" + process2 "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/core/plugin/process/status.go b/internal/pkg/core/plugin/process/status.go index e11f39d1d5f..93d7c723b05 100644 --- a/internal/pkg/core/plugin/process/status.go +++ b/internal/pkg/core/plugin/process/status.go @@ -7,14 +7,13 @@ package process import ( "context" "fmt" - "github.com/elastic/elastic-agent/pkg/core/process" "time" - "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent/internal/pkg/core/state" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/core/plugin/service/app.go b/internal/pkg/core/plugin/service/app.go index 514d879986d..ab3631f12c0 100644 --- a/internal/pkg/core/plugin/service/app.go +++ b/internal/pkg/core/plugin/service/app.go @@ -7,7 +7,6 @@ package service import ( "context" "fmt" - "github.com/elastic/elastic-agent/pkg/core/process" "io" "net" "reflect" @@ -28,6 +27,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/core/state/state.go b/internal/pkg/core/state/state.go index 4ad0f17cf8b..57dfb639b72 100644 --- a/internal/pkg/core/state/state.go +++ b/internal/pkg/core/state/state.go @@ -6,10 +6,11 @@ package state import ( "context" - "github.com/elastic/elastic-agent/pkg/core/process" "strings" "github.com/elastic/elastic-agent-client/v7/pkg/proto" + + "github.com/elastic/elastic-agent/pkg/core/process" ) // Status describes the current status of the application process. diff --git a/main.go b/main.go index 230135d3346..ec2959614f3 100644 --- a/main.go +++ b/main.go @@ -6,12 +6,12 @@ package main import ( "fmt" - "github.com/elastic/elastic-agent/pkg/core/process" "math/rand" "os" "time" "github.com/elastic/elastic-agent/internal/pkg/agent/cmd" + "github.com/elastic/elastic-agent/pkg/core/process" ) // Setups and Runs agent. From 6f0d148bc2c448351a85b93e76142ecc77f4e0af Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 29 Jun 2022 21:37:45 -0400 Subject: [PATCH 03/19] Add tests for watching checkins. --- pkg/component/runtime/command.go | 4 +- pkg/component/runtime/failed.go | 3 +- pkg/component/runtime/manager.go | 4 +- pkg/component/runtime/manager_test.go | 113 ++++++++++++++++++++++++++ pkg/component/runtime/runtime.go | 5 +- 5 files changed, 123 insertions(+), 6 deletions(-) diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index 3169fee1521..d0670d47f60 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -61,7 +61,7 @@ func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { }, nil } -func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) { +func (c *CommandRuntime) Run(ctx context.Context, comm Communicator, checkinPeriod time.Duration) { c.forceCompState(client.UnitStateStarting, "Starting") t := time.NewTicker(checkinPeriod) defer t.Stop() @@ -150,7 +150,7 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) { // // at this point it is assumed the sub-process has locked up and will not respond to a nice // termination signal, so we jump directly to killing the process - msg := fmt.Sprintf("Failed: pid '%d' missed %d check-in's and will be killed", c.proc.PID, maxCheckinMisses) + msg := fmt.Sprintf("Failed: pid '%d' missed %d check-ins and will be killed", c.proc.PID, maxCheckinMisses) c.forceCompState(client.UnitStateFailed, msg) _ = c.proc.Kill() // watcher will handle it from here } diff --git a/pkg/component/runtime/failed.go b/pkg/component/runtime/failed.go index 55a687aa1fc..293d9f65054 100644 --- a/pkg/component/runtime/failed.go +++ b/pkg/component/runtime/failed.go @@ -3,6 +3,7 @@ package runtime import ( "context" "errors" + "time" "github.com/elastic/elastic-agent-client/v7/pkg/client" @@ -29,7 +30,7 @@ func NewFailedRuntime(comp component.Component) (ComponentRuntime, error) { } // Run runs the runtime for a component that got an error from the component loader. -func (c *FailedRuntime) Run(ctx context.Context, _ Communicator) { +func (c *FailedRuntime) Run(ctx context.Context, _ Communicator, _ time.Duration) { // state is hard coded to failed c.ch <- createState(c.current, false) select { diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index 69f3bc643b6..70af8955530 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -67,7 +67,8 @@ type Manager struct { subMx sync.RWMutex subscriptions map[string][]*Subscription - shuttingDown atomic.Bool + shuttingDown atomic.Bool + checkinPeriod time.Duration } // NewManager creates a new manager. @@ -84,6 +85,7 @@ func NewManager(logger *logger.Logger, listenAddr string, tracer *apm.Tracer) (* waitReady: make(map[string]waitForReady), current: make(map[string]*componentRuntimeState), subscriptions: make(map[string][]*Subscription), + checkinPeriod: checkinPeriod, } return m, nil } diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index ba6cdf8102a..d83bd5f08f9 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -788,6 +788,119 @@ LOOP: require.NoError(t, err) } +func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + m.checkinPeriod = 100 * time.Millisecond + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{}, + }, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + wasDegraded := false + + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateStarting || state.State == client.UnitStateHealthy { + // starting and healthy are allowed + } else if state.State == client.UnitStateDegraded { + // should go to degraded first + wasDegraded = true + } else if state.State == client.UnitStateFailed { + if wasDegraded { + subErrCh <- nil + } else { + subErrCh <- errors.New("should have been degraded before failed") + } + } else { + subErrCh <- fmt.Errorf("unknown component state: %v", state.State) + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + func TestManager_FakeInput_InvalidAction(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index 6214ea573c7..36f52805dbe 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -9,6 +9,7 @@ import ( "github.com/elastic/elastic-agent-libs/atomic" "github.com/elastic/elastic-agent/pkg/core/logger" "sync" + "time" "github.com/elastic/elastic-agent/pkg/component" ) @@ -163,7 +164,7 @@ type ComponentRuntime interface { // Called by Manager inside a go-routine. Run should not return until the passed in context is done. Run is always // called before any of the other methods in the interface and once the context is done none of those methods will // ever be called again. - Run(ctx context.Context, comm Communicator) + Run(ctx context.Context, comm Communicator, checkinPeriod time.Duration) // Watch returns the channel that sends component state. // // Channel should send a new state anytime a state for a unit or the whole component changes. @@ -287,7 +288,7 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. go func() { defer close(runChan) defer comm.destroy() - runtime.Run(runCtx, comm) + runtime.Run(runCtx, comm, m.checkinPeriod) }() return state, nil From 35845b2b004a50f7e165b164420436ba09d3e1ff Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 30 Jun 2022 09:25:56 -0400 Subject: [PATCH 04/19] Fix lint and move checkin period to a configurable timeout. --- pkg/component/fake/main.go | 21 +++++++++++++-------- pkg/component/runtime/command.go | 9 +++++++-- pkg/component/runtime/failed.go | 4 +--- pkg/component/runtime/manager.go | 7 +------ pkg/component/runtime/manager_test.go | 11 ++++++++--- pkg/component/runtime/runtime.go | 8 +++----- pkg/component/spec.go | 6 +++--- pkg/core/process/cmd_linux.go | 3 ++- 8 files changed, 38 insertions(+), 31 deletions(-) diff --git a/pkg/component/fake/main.go b/pkg/component/fake/main.go index c8127125f4e..ac851b277e4 100644 --- a/pkg/component/fake/main.go +++ b/pkg/component/fake/main.go @@ -8,29 +8,34 @@ import ( "context" "errors" "fmt" - "gopkg.in/yaml.v2" "os" "os/signal" "syscall" "time" + "gopkg.in/yaml.v2" + "github.com/elastic/elastic-agent-client/v7/pkg/client" ) +const ( + fake = "fake" +) + func main() { err := run() if err != nil { - fmt.Printf("%s\n", err) + fmt.Fprintf(os.Stderr, "%s\n", err) os.Exit(1) } } func run() error { ver := client.VersionInfo{ - Name: "fake", + Name: fake, Version: "1.0", Meta: map[string]string{ - "input": "fake", + "input": fake, }, } c, _, err := client.NewV2FromReader(os.Stdin, ver) @@ -73,7 +78,7 @@ func run() error { s.removed(change.Unit) } case <-c.Errors(): - //fmt.Fprintf(os.Stderr, "GRPC client error: %s", err) + fmt.Fprintf(os.Stderr, "GRPC client error: %s", err) } } } @@ -182,13 +187,13 @@ func (f *fakeInput) Update(u *client.Unit) error { if !ok { return fmt.Errorf("unit missing config type") } - if unitType != "fake" { + if unitType != fake { return fmt.Errorf("unit type changed with the same unit ID: %s", unitType) } state, stateMsg, err := getStateFromMap(cfg) if err != nil { - return fmt.Errorf("unit config parsing error: %s", err) + return fmt.Errorf("unit config parsing error: %w", err) } f.state = state f.stateMsg = stateMsg @@ -242,7 +247,7 @@ func newRunningUnit(unit *client.Unit) (runningUnit, error) { return nil, fmt.Errorf("unit config type empty") } switch unitType { - case "fake": + case fake: return newFakeInput(unit, cfg) } return nil, fmt.Errorf("unknown unit config type: %s", unitType) diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index d0670d47f60..ef6e63ffadb 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -61,7 +61,8 @@ func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { }, nil } -func (c *CommandRuntime) Run(ctx context.Context, comm Communicator, checkinPeriod time.Duration) { +func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) { + checkinPeriod := c.current.Spec.Spec.Command.Timeouts.Checkin c.forceCompState(client.UnitStateStarting, "Starting") t := time.NewTicker(checkinPeriod) defer t.Stop() @@ -205,7 +206,11 @@ func (c *CommandRuntime) compState(state client.UnitState) { if state == client.UnitStateHealthy { msg = fmt.Sprintf("Healthy: communicating with pid '%d'", c.proc.PID) } else if state == client.UnitStateDegraded { - msg = fmt.Sprintf("Degraded: pid '%d' missed '%d' check-ins", c.proc.PID, c.missedCheckins) + if c.missedCheckins == 1 { + msg = fmt.Sprintf("Degraded: pid '%d' missed 1 check-in", c.proc.PID) + } else { + msg = fmt.Sprintf("Degraded: pid '%d' missed %d check-ins", c.proc.PID, c.missedCheckins) + } } if c.observed.State != state || c.observed.Message != msg { c.observed.State = state diff --git a/pkg/component/runtime/failed.go b/pkg/component/runtime/failed.go index 293d9f65054..a89746aac35 100644 --- a/pkg/component/runtime/failed.go +++ b/pkg/component/runtime/failed.go @@ -3,8 +3,6 @@ package runtime import ( "context" "errors" - "time" - "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/pkg/component" @@ -30,7 +28,7 @@ func NewFailedRuntime(comp component.Component) (ComponentRuntime, error) { } // Run runs the runtime for a component that got an error from the component loader. -func (c *FailedRuntime) Run(ctx context.Context, _ Communicator, _ time.Duration) { +func (c *FailedRuntime) Run(ctx context.Context, _ Communicator) { // state is hard coded to failed c.ch <- createState(c.current, false) select { diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index 70af8955530..016e857ae96 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -32,9 +32,6 @@ const ( // initialCheckinTimeout is the maximum amount of wait time from initial check-in stream to // getting the first check-in observed state. initialCheckinTimeout = 5 * time.Second - // checkinPeriod is how often a component must send an observed checkin message over the communication - // control protocol. - checkinPeriod = 30 * time.Second // maxCheckinMisses is the maximum number of check-in misses a component can miss before it is killed // and restarted. maxCheckinMisses = 3 @@ -67,8 +64,7 @@ type Manager struct { subMx sync.RWMutex subscriptions map[string][]*Subscription - shuttingDown atomic.Bool - checkinPeriod time.Duration + shuttingDown atomic.Bool } // NewManager creates a new manager. @@ -85,7 +81,6 @@ func NewManager(logger *logger.Logger, listenAddr string, tracer *apm.Tracer) (* waitReady: make(map[string]waitForReady), current: make(map[string]*componentRuntimeState), subscriptions: make(map[string][]*Subscription), - checkinPeriod: checkinPeriod, } return m, nil } diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index d83bd5f08f9..8a43b6a3316 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -794,7 +794,6 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) require.NoError(t, err) - m.checkinPeriod = 100 * time.Millisecond errCh := make(chan error) go func() { errCh <- m.Run(ctx) @@ -817,8 +816,14 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { BinaryName: "", BinaryPath: binaryPath, Spec: component.InputSpec{ - Name: "fake", - Command: &component.CommandSpec{}, + Name: "fake", + Command: &component.CommandSpec{ + Timeouts: component.CommandTimeoutSpec{ + // very low checkin timeout so we can cause missed check-ins + Checkin: 100 * time.Millisecond, + Stop: 30 * time.Second, + }, + }, }, }, Units: []component.Unit{ diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index 36f52805dbe..a97e8c962cd 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -7,11 +7,9 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/elastic/elastic-agent-libs/atomic" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" "sync" - "time" - - "github.com/elastic/elastic-agent/pkg/component" ) // ComponentUnitState is the state for a unit running in a component. @@ -164,7 +162,7 @@ type ComponentRuntime interface { // Called by Manager inside a go-routine. Run should not return until the passed in context is done. Run is always // called before any of the other methods in the interface and once the context is done none of those methods will // ever be called again. - Run(ctx context.Context, comm Communicator, checkinPeriod time.Duration) + Run(ctx context.Context, comm Communicator) // Watch returns the channel that sends component state. // // Channel should send a new state anytime a state for a unit or the whole component changes. @@ -288,7 +286,7 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. go func() { defer close(runChan) defer comm.destroy() - runtime.Run(runCtx, comm, m.checkinPeriod) + runtime.Run(runCtx, comm) }() return state, nil diff --git a/pkg/component/spec.go b/pkg/component/spec.go index dc916d7805b..d4ae56e147b 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -69,13 +69,13 @@ type CommandEnvSpec struct { // CommandTimeoutSpec is the timeout specification for subprocess. type CommandTimeoutSpec struct { - Spawn time.Duration `config:"spawn" yaml:"spawn"` - Stop time.Duration `config:"stop" yaml:"stop"` + Checkin time.Duration `config:"checkin" yaml:"checkin"` + Stop time.Duration `config:"stop" yaml:"stop"` } // InitDefaults initialized the defaults for the timeouts. func (t *CommandTimeoutSpec) InitDefaults() { - t.Spawn = 30 * time.Second + t.Checkin = 30 * time.Second t.Stop = 30 * time.Second } diff --git a/pkg/core/process/cmd_linux.go b/pkg/core/process/cmd_linux.go index 958b3e832e9..88e42d6d9e3 100644 --- a/pkg/core/process/cmd_linux.go +++ b/pkg/core/process/cmd_linux.go @@ -9,6 +9,7 @@ package process import ( "context" + "fmt" "math" "os" "os/exec" @@ -41,7 +42,7 @@ func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg .. return nil, fmt.Errorf("invalid uid: '%d' or gid: '%d'", uid, gid) } - return cmd + return cmd, nil } func isInt32(val int) bool { From a60ae17244b3682f139ef5ec2617f3296b29fa8d Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 30 Jun 2022 10:28:18 -0400 Subject: [PATCH 05/19] Fix tests now that checkin timeout needs to be defined. --- pkg/component/runtime/manager_test.go | 47 +++++++++++---------------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index 8a43b6a3316..f4db1e62d39 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -18,6 +18,18 @@ import ( "github.com/elastic/elastic-agent/pkg/component" ) +var ( + fakeInputSpec = component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{ + Timeouts: component.CommandTimeoutSpec{ + Checkin: 30 * time.Second, + Stop: 30 * time.Second, + }, + }, + } +) + func TestManager_SimpleComponentErr(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -141,10 +153,7 @@ func TestManager_FakeInput_StartStop(t *testing.T) { InputType: "fake", BinaryName: "", BinaryPath: binaryPath, - Spec: component.InputSpec{ - Name: "fake", - Command: &component.CommandSpec{}, - }, + Spec: fakeInputSpec, }, Units: []component.Unit{ { @@ -262,10 +271,7 @@ func TestManager_FakeInput_Configure(t *testing.T) { InputType: "fake", BinaryName: "", BinaryPath: binaryPath, - Spec: component.InputSpec{ - Name: "fake", - Command: &component.CommandSpec{}, - }, + Spec: fakeInputSpec, }, Units: []component.Unit{ { @@ -388,10 +394,7 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { InputType: "fake", BinaryName: "", BinaryPath: binaryPath, - Spec: component.InputSpec{ - Name: "fake", - Command: &component.CommandSpec{}, - }, + Spec: fakeInputSpec, }, Units: []component.Unit{ { @@ -546,10 +549,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { InputType: "fake", BinaryName: "", BinaryPath: binaryPath, - Spec: component.InputSpec{ - Name: "fake", - Command: &component.CommandSpec{}, - }, + Spec: fakeInputSpec, }, Units: []component.Unit{ { @@ -672,10 +672,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { InputType: "fake", BinaryName: "", BinaryPath: binaryPath, - Spec: component.InputSpec{ - Name: "fake", - Command: &component.CommandSpec{}, - }, + Spec: fakeInputSpec, }, Units: []component.Unit{ { @@ -933,10 +930,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { InputType: "fake", BinaryName: "", BinaryPath: binaryPath, - Spec: component.InputSpec{ - Name: "fake", - Command: &component.CommandSpec{}, - }, + Spec: fakeInputSpec, }, Units: []component.Unit{ { @@ -1055,10 +1049,7 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { InputType: "fake", BinaryName: "", BinaryPath: binaryPath, - Spec: component.InputSpec{ - Name: "fake", - Command: &component.CommandSpec{}, - }, + Spec: fakeInputSpec, } components := []component.Component{ { From a40830cc1c6aabc7aa5e36a617f130bce8e3109c Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 13 Jul 2022 15:19:48 -0400 Subject: [PATCH 06/19] Fix code review and lint. --- ...crypted_disk_storage_windows_linux_test.go | 3 +- .../install/atomic/atomic_installer.go | 3 +- internal/pkg/core/plugin/process/app.go | 10 ++-- internal/pkg/core/plugin/process/start.go | 2 +- internal/pkg/core/plugin/process/status.go | 3 +- pkg/component/fake/main.go | 37 ++++++------- pkg/component/runtime/command.go | 54 ++++++++++++++----- pkg/component/runtime/failed.go | 10 +++- pkg/component/runtime/manager.go | 33 +++++++----- pkg/component/runtime/manager_test.go | 23 +++++--- pkg/component/runtime/runtime.go | 40 +++++++------- pkg/component/runtime/runtime_comm.go | 13 +++-- pkg/component/runtime/subscription.go | 6 ++- pkg/core/process/process.go | 10 ++-- 14 files changed, 159 insertions(+), 88 deletions(-) diff --git a/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go b/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go index cc53d065c73..a965295e9cc 100644 --- a/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go +++ b/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go @@ -16,8 +16,9 @@ import ( "path/filepath" "testing" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/google/go-cmp/cmp" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" ) const ( diff --git a/internal/pkg/artifact/install/atomic/atomic_installer.go b/internal/pkg/artifact/install/atomic/atomic_installer.go index 3e61aacb4ef..10c2652c1c8 100644 --- a/internal/pkg/artifact/install/atomic/atomic_installer.go +++ b/internal/pkg/artifact/install/atomic/atomic_installer.go @@ -11,8 +11,9 @@ import ( "path/filepath" "runtime" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/hashicorp/go-multierror" + + "github.com/elastic/elastic-agent/internal/pkg/agent/program" ) type embeddedInstaller interface { diff --git a/internal/pkg/core/plugin/process/app.go b/internal/pkg/core/plugin/process/app.go index f5e1afa085b..c184cefe397 100644 --- a/internal/pkg/core/plugin/process/app.go +++ b/internal/pkg/core/plugin/process/app.go @@ -23,7 +23,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" "github.com/elastic/elastic-agent/pkg/core/logger" - process2 "github.com/elastic/elastic-agent/pkg/core/process" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) @@ -56,7 +56,7 @@ type Application struct { monitor monitoring.Monitor statusReporter status.Reporter - processConfig *process2.Config + processConfig *process.Config logger *logger.Logger @@ -190,7 +190,7 @@ func (a *Application) SetState(s state.Status, msg string, payload map[string]in a.setState(s, msg, payload) } -func (a *Application) watch(ctx context.Context, p app.Taggable, proc *process2.Info, cfg map[string]interface{}) { +func (a *Application) watch(ctx context.Context, p app.Taggable, proc *process.Info, cfg map[string]interface{}) { go func() { var procState *os.ProcessState @@ -235,7 +235,7 @@ func (a *Application) watch(ctx context.Context, p app.Taggable, proc *process2. }() } -func (a *Application) stopWatcher(procInfo *process2.Info) { +func (a *Application) stopWatcher(procInfo *process.Info) { if procInfo != nil { if closer, ok := a.watchClosers[procInfo.PID]; ok { closer() @@ -280,7 +280,7 @@ func (a *Application) cleanUp() { a.monitor.Cleanup(a.desc.Spec(), a.pipelineID) } -func (a *Application) gracefulKill(proc *process2.Info) { +func (a *Application) gracefulKill(proc *process.Info) { if proc == nil || proc.Process == nil { return } diff --git a/internal/pkg/core/plugin/process/start.go b/internal/pkg/core/plugin/process/start.go index 4332ab071e3..55081d9977d 100644 --- a/internal/pkg/core/plugin/process/start.go +++ b/internal/pkg/core/plugin/process/start.go @@ -7,7 +7,6 @@ package process import ( "context" "fmt" - "github.com/elastic/elastic-agent/pkg/core/process" "io" "os/exec" "path/filepath" @@ -20,6 +19,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/state" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/core/plugin/process/status.go b/internal/pkg/core/plugin/process/status.go index 93d7c723b05..50488dfd77b 100644 --- a/internal/pkg/core/plugin/process/status.go +++ b/internal/pkg/core/plugin/process/status.go @@ -9,9 +9,10 @@ import ( "fmt" "time" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" "gopkg.in/yaml.v2" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" diff --git a/pkg/component/fake/main.go b/pkg/component/fake/main.go index ac851b277e4..d92586aa0a3 100644 --- a/pkg/component/fake/main.go +++ b/pkg/component/fake/main.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "io" "os" "os/signal" "syscall" @@ -77,8 +78,10 @@ func run() error { case client.UnitChangedRemoved: s.removed(change.Unit) } - case <-c.Errors(): - fmt.Fprintf(os.Stderr, "GRPC client error: %s", err) + case err := <-c.Errors(): + if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { + fmt.Fprintf(os.Stderr, "GRPC client error: %s", err) + } } } } @@ -139,22 +142,18 @@ type runningUnit interface { type fakeInput struct { unit *client.Unit - cfg map[string]interface{} + cfg inputConfig state client.UnitState stateMsg string } -func newFakeInput(unit *client.Unit, cfg map[string]interface{}) (*fakeInput, error) { - state, msg, err := getStateFromMap(cfg) - if err != nil { - return nil, err - } +func newFakeInput(unit *client.Unit, cfg inputConfig) (*fakeInput, error) { i := &fakeInput{ unit: unit, cfg: cfg, - state: state, - stateMsg: msg, + state: cfg.State, + stateMsg: cfg.Message, } unit.RegisterAction(&stateSetterAction{i}) unit.RegisterAction(&killAction{}) @@ -234,23 +233,19 @@ func (s *killAction) Execute(_ context.Context, params map[string]interface{}) ( func newRunningUnit(unit *client.Unit) (runningUnit, error) { _, config := unit.Expected() - var cfg map[string]interface{} + var cfg inputConfig err := yaml.Unmarshal([]byte(config), &cfg) if err != nil { return nil, fmt.Errorf("failed to unmarshal YAML: %w", err) } - unitType, ok := cfg["type"] - if !ok { - return nil, fmt.Errorf("unit missing config type") - } - if unitType == "" { + if cfg.Type == "" { return nil, fmt.Errorf("unit config type empty") } - switch unitType { + switch cfg.Type { case fake: return newFakeInput(unit, cfg) } - return nil, fmt.Errorf("unknown unit config type: %s", unitType) + return nil, fmt.Errorf("unknown unit config type: %s", cfg.Type) } func newUnitKey(unit *client.Unit) unitKey { @@ -282,3 +277,9 @@ func getStateFromMap(cfg map[string]interface{}) (client.UnitState, string, erro } return stateType, stateMsgStr, nil } + +type inputConfig struct { + Type string `json:"type" yaml:"type"` + State client.UnitState `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` +} diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index ef6e63ffadb..dce34e4bcd4 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -1,24 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package runtime import ( "context" "errors" "fmt" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - "gopkg.in/yaml.v2" "os" "os/exec" "time" + "gopkg.in/yaml.v2" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/process" ) +type actionMode int + const ( - actionStart = 0 - actionStop = 1 + actionStart = actionMode(0) + actionStop = actionMode(1) ) type procState struct { @@ -26,15 +34,16 @@ type procState struct { state *os.ProcessState } +// CommandRuntime provides the command runtime for running a component as a subprocess. type CommandRuntime struct { current component.Component ch chan ComponentState - actionCh chan int + actionCh chan actionMode procCh chan procState compCh chan component.Component - actionState int + actionState actionMode proc *process.Info expected ComponentState @@ -43,16 +52,17 @@ type CommandRuntime struct { missedCheckins int } +// NewCommandRuntime creates a new command runtime for the provided component. func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { if comp.Spec.Spec.Command == nil { - return nil, errors.New("must have command defined in input specification") + return nil, errors.New("must have command defined in specification") } expected := newComponentState(&comp, client.UnitStateHealthy, "", 1) observed := newComponentState(&comp, client.UnitStateStarting, "Starting", 0) return &CommandRuntime{ current: comp, ch: make(chan ComponentState), - actionCh: make(chan int), + actionCh: make(chan actionMode), procCh: make(chan procState), compCh: make(chan component.Component), actionState: actionStart, @@ -61,7 +71,12 @@ func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { }, nil } -func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) { +// Run starts the runtime for the component. +// +// Called by Manager inside a go-routine. Run should not return until the passed in context is done. Run is always +// called before any of the other methods in the interface and once the context is done none of those methods will +// ever be called again. +func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { checkinPeriod := c.current.Spec.Spec.Command.Timeouts.Checkin c.forceCompState(client.UnitStateStarting, "Starting") t := time.NewTicker(checkinPeriod) @@ -69,7 +84,7 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) { for { select { case <-ctx.Done(): - return + return ctx.Err() case as := <-c.actionCh: c.actionState = as switch as { @@ -135,10 +150,10 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) { now := time.Now().UTC() if c.lastCheckin.IsZero() { // never checked-in - c.missedCheckins += 1 + c.missedCheckins++ } else if now.Sub(c.lastCheckin) > checkinPeriod { // missed check-in during required period - c.missedCheckins += 1 + c.missedCheckins++ } else if now.Sub(c.lastCheckin) <= checkinPeriod { c.missedCheckins = 0 } @@ -160,25 +175,40 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) { } } +// Watch returns the channel that sends component state. +// +// Channel should send a new state anytime a state for a unit or the whole component changes. func (c *CommandRuntime) Watch() <-chan ComponentState { return c.ch } +// Start starts the component. +// +// Non-blocking and never returns an error. func (c *CommandRuntime) Start() error { c.actionCh <- actionStart return nil } +// Update updates the currComp runtime with a new-revision for the component definition. +// +// Non-blocking and never returns an error. func (c *CommandRuntime) Update(comp component.Component) error { c.compCh <- comp return nil } +// Stop stops the component. +// +// Non-blocking and never returns an error. func (c *CommandRuntime) Stop() error { c.actionCh <- actionStop return nil } +// Teardown tears down the component. +// +// Non-blocking and never returns an error. func (c *CommandRuntime) Teardown() error { // teardown is not different from stop for command runtime return c.Stop() diff --git a/pkg/component/runtime/failed.go b/pkg/component/runtime/failed.go index a89746aac35..b495eeb4d2a 100644 --- a/pkg/component/runtime/failed.go +++ b/pkg/component/runtime/failed.go @@ -1,8 +1,13 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package runtime import ( "context" "errors" + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/pkg/component" @@ -28,17 +33,18 @@ func NewFailedRuntime(comp component.Component) (ComponentRuntime, error) { } // Run runs the runtime for a component that got an error from the component loader. -func (c *FailedRuntime) Run(ctx context.Context, _ Communicator) { +func (c *FailedRuntime) Run(ctx context.Context, _ Communicator) error { // state is hard coded to failed c.ch <- createState(c.current, false) select { case <-ctx.Done(): - return + return ctx.Err() case <-c.done: // set to stopped as soon as done is given c.ch <- createState(c.current, true) } <-ctx.Done() + return ctx.Err() } // Watch returns the watch channel. diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index 016e857ae96..e7125e82f68 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -1,3 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package runtime import ( @@ -7,15 +11,13 @@ import ( "encoding/json" "errors" "fmt" - "github.com/gofrs/uuid" "net" "strings" "sync" "time" - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - "github.com/elastic/elastic-agent-libs/atomic" + "github.com/gofrs/uuid" + "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "google.golang.org/grpc" @@ -23,6 +25,10 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/status" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent-libs/atomic" + "github.com/elastic/elastic-agent/internal/pkg/core/authority" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -209,7 +215,7 @@ func (m *Manager) WaitForReady(ctx context.Context) error { } } -// Update updates the current state of the running components. +// Update updates the currComp state of the running components. // // This returns as soon as possible, work is performed in the background to func (m *Manager) Update(components []component.Component) error { @@ -290,13 +296,13 @@ func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name s func (m *Manager) Subscribe(componentID string) *Subscription { sub := newSubscription(m) - // add latest to channel + // add latestState to channel m.mx.RLock() comp, ok := m.current[componentID] m.mx.RUnlock() if ok { comp.latestMx.RLock() - sub.ch <- comp.latest + sub.ch <- comp.latestState comp.latestMx.RUnlock() } @@ -308,10 +314,12 @@ func (m *Manager) Subscribe(componentID string) *Subscription { return sub } +// Checkin is called by v1 sub-processes and has been removed. func (m *Manager) Checkin(_ proto.ElasticAgent_CheckinServer) error { return status.Error(codes.Unavailable, "removed; upgrade to V2") } +// CheckinV2 is the new v2 communication for components. func (m *Manager) CheckinV2(server proto.ElasticAgent_CheckinV2Server) error { initCheckinChan := make(chan *proto.CheckinObserved) go func() { @@ -353,6 +361,7 @@ func (m *Manager) CheckinV2(server proto.ElasticAgent_CheckinV2Server) error { return runtime.comm.checkin(server, initCheckin) } +// Actions is the actions stream used to broker actions between Elastic Agent and components. func (m *Manager) Actions(server proto.ElasticAgent_ActionsServer) error { initRespChan := make(chan *proto.ActionResponse) go func() { @@ -412,7 +421,7 @@ func (m *Manager) update(components []component.Component, teardown bool) error existing, ok := m.current[comp.ID] if ok { // existing component; send runtime updated value - existing.current = comp + existing.currComp = comp if err := existing.runtime.Update(comp); err != nil { return fmt.Errorf("failed to update component %s: %w", comp.ID, err) } @@ -462,7 +471,7 @@ func (m *Manager) shutdown() { func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentState) { m.subMx.RLock() - subs, ok := m.subscriptions[state.current.ID] + subs, ok := m.subscriptions[state.currComp.ID] if ok { for _, sub := range subs { sub.ch <- latest @@ -472,9 +481,9 @@ func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentSta shutdown := state.shuttingDown.Load() if shutdown && latest.State == client.UnitStateStopped { - // shutdown is complete; remove from current + // shutdown is complete; remove from currComp m.mx.Lock() - delete(m.current, state.current.ID) + delete(m.current, state.currComp.ID) m.mx.Unlock() state.destroy() @@ -540,7 +549,7 @@ func (m *Manager) getRuntimeFromUnit(unit component.Unit) *componentRuntimeState m.mx.RLock() defer m.mx.RUnlock() for _, comp := range m.current { - for _, u := range comp.current.Units { + for _, u := range comp.currComp.Units { if u.Type == unit.Type && u.ID == unit.ID { return comp } diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index f4db1e62d39..adeb2b1243a 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -1,20 +1,27 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package runtime import ( "context" "errors" "fmt" - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/pkg/core/logger" - "go.elastic.co/apm/apmtest" "path/filepath" "runtime" "testing" "time" - "github.com/elastic/elastic-agent-client/v7/pkg/client" + "go.elastic.co/apm/apmtest" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent/pkg/component" ) @@ -1209,19 +1216,19 @@ LOOP: require.NoError(t, err) case err := <-subErrCh0: require.NoError(t, err) - count += 1 + count++ if count >= 3 { break LOOP } case err := <-subErrCh1: require.NoError(t, err) - count += 1 + count++ if count >= 3 { break LOOP } case err := <-subErrCh2: require.NoError(t, err) - count += 1 + count++ if count >= 3 { break LOOP } @@ -1266,7 +1273,7 @@ func signalState(subErrCh chan error, state *ComponentState) { if unit.State == client.UnitStateStarting { // acceptable } else if unit.State == client.UnitStateHealthy { - healthy += 1 + healthy++ } else if issue == "" { issue = fmt.Sprintf("unit %s in invalid state %v", key.UnitID, unit.State) } diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index a97e8c962cd..ee4800ce36b 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -1,15 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package runtime import ( "context" "encoding/json" "errors" + "sync" + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/elastic/elastic-agent-libs/atomic" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" - "sync" ) // ComponentUnitState is the state for a unit running in a component. @@ -71,7 +76,7 @@ func (s *ComponentState) syncComponent(comp *component.Component, initState clie existing.Payload = nil existing.config = unit.Config if ok { - existing.configStateIdx += 1 + existing.configStateIdx++ } else { existing.configStateIdx = initCfgIdx } @@ -162,7 +167,7 @@ type ComponentRuntime interface { // Called by Manager inside a go-routine. Run should not return until the passed in context is done. Run is always // called before any of the other methods in the interface and once the context is done none of those methods will // ever be called again. - Run(ctx context.Context, comm Communicator) + Run(ctx context.Context, comm Communicator) error // Watch returns the channel that sends component state. // // Channel should send a new state anytime a state for a unit or the whole component changes. @@ -171,7 +176,7 @@ type ComponentRuntime interface { // // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. Start() error - // Update updates the current runtime with a new-revision for the component definition. + // Update updates the currComp runtime with a new-revision for the component definition. // // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. Update(comp component.Component) error @@ -208,13 +213,13 @@ type componentRuntimeState struct { logger *logger.Logger comm *runtimeComm - current component.Component - runtime ComponentRuntime + currComp component.Component + runtime ComponentRuntime shuttingDown atomic.Bool - latestMx sync.RWMutex - latest ComponentState + latestMx sync.RWMutex + latestState ComponentState watchChan chan bool watchCanceller context.CancelFunc @@ -239,12 +244,12 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. watchChan := make(chan bool) runChan := make(chan bool) state := &componentRuntimeState{ - manager: m, - logger: logger, - comm: comm, - current: comp, - runtime: runtime, - latest: ComponentState{ + manager: m, + logger: logger, + comm: comm, + currComp: comp, + runtime: runtime, + latestState: ComponentState{ State: client.UnitStateStarting, Message: "Starting", Units: nil, @@ -265,7 +270,7 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. return case s := <-runtime.Watch(): state.latestMx.Lock() - state.latest = s + state.latestState = s state.latestMx.Unlock() state.manager.stateChanged(state, s) case ar := <-comm.actionsResponse: @@ -286,7 +291,7 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. go func() { defer close(runChan) defer comm.destroy() - runtime.Run(runCtx, comm) + _ = runtime.Run(runCtx, comm) }() return state, nil @@ -300,9 +305,8 @@ func (s *componentRuntimeState) stop(teardown bool) error { s.shuttingDown.Store(true) if teardown { return s.runtime.Teardown() - } else { - return s.runtime.Stop() } + return s.runtime.Stop() } func (s *componentRuntimeState) destroy() { diff --git a/pkg/component/runtime/runtime_comm.go b/pkg/component/runtime/runtime_comm.go index 3a15a79dc8c..622b514c230 100644 --- a/pkg/component/runtime/runtime_comm.go +++ b/pkg/component/runtime/runtime_comm.go @@ -1,19 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package runtime import ( "errors" "fmt" - "github.com/elastic/elastic-agent-client/v7/pkg/client" - protobuf "google.golang.org/protobuf/proto" "io" "strings" "sync" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" + protobuf "google.golang.org/protobuf/proto" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/gofrs/uuid" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent/internal/pkg/core/authority" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/pkg/component/runtime/subscription.go b/pkg/component/runtime/subscription.go index c9574aaa030..88f4106d21a 100644 --- a/pkg/component/runtime/subscription.go +++ b/pkg/component/runtime/subscription.go @@ -1,3 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package runtime // Subscription provides a channel for notifications on a component state. @@ -9,7 +13,7 @@ type Subscription struct { func newSubscription(manager *Manager) *Subscription { return &Subscription{ manager: manager, - ch: make(chan ComponentState, 1), // buffer of 1 to allow initial latest state + ch: make(chan ComponentState, 1), // buffer of 1 to allow initial latestState state } } diff --git a/pkg/core/process/process.go b/pkg/core/process/process.go index 73ff4e9bfbf..428469687b6 100644 --- a/pkg/core/process/process.go +++ b/pkg/core/process/process.go @@ -31,21 +31,21 @@ func Start(path string, uid, gid int, args []string, env []string, opts ...Optio func StartContext(ctx context.Context, path string, uid, gid int, args []string, env []string, opts ...Option) (*Info, error) { cmd, err := getCmd(ctx, path, env, uid, gid, args...) if err != nil { - return nil, fmt.Errorf("failed to create command for '%s': %w", path, err) + return nil, fmt.Errorf("failed to create command for %q: %w", path, err) } for _, o := range opts { if err := o(cmd); err != nil { - return nil, fmt.Errorf("failed to set option command for '%s': %w", path, err) + return nil, fmt.Errorf("failed to set option command for %q: %w", path, err) } } stdin, err := cmd.StdinPipe() if err != nil { - return nil, fmt.Errorf("failed to create stdin for '%s': %w", path, err) + return nil, fmt.Errorf("failed to create stdin for %q: %w", path, err) } // start process if err := cmd.Start(); err != nil { - return nil, fmt.Errorf("failed to start '%s': %w", path, err) + return nil, fmt.Errorf("failed to start %q: %w", path, err) } // Hook to JobObject on windows, noop on other platforms. @@ -54,7 +54,7 @@ func StartContext(ctx context.Context, path string, uid, gid int, args []string, // after the agent process gets killed. if err := JobObject.Assign(cmd.Process); err != nil { _ = killCmd(cmd.Process) - return nil, fmt.Errorf("failed job assignment '%s': %w", path, err) + return nil, fmt.Errorf("failed job assignment %q: %w", path, err) } return &Info{ From a00db54b1b4b5b41e4ee4c174dacf56b22faf677 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 13 Jul 2022 14:11:44 -0400 Subject: [PATCH 07/19] Work on actually running the v2 runtime. --- internal/pkg/agent/application/application.go | 98 ++++--- .../application/coordinator/coordinator.go | 261 ++++++++++++++++++ .../application/fleet_server_bootstrap.go | 2 + internal/pkg/agent/application/local_mode.go | 180 ++++-------- .../pkg/agent/application/managed_mode.go | 101 ++----- internal/pkg/agent/application/once.go | 29 +- internal/pkg/agent/application/periodic.go | 57 ++-- .../application/pipeline/emitter/emitter.go | 5 +- internal/pkg/agent/cmd/inspect.go | 11 +- internal/pkg/agent/cmd/run.go | 24 +- internal/pkg/agent/configuration/grpc.go | 22 ++ internal/pkg/agent/configuration/settings.go | 5 +- internal/pkg/agent/install/uninstall.go | 13 +- internal/pkg/composable/controller.go | 150 +++++----- internal/pkg/composable/controller_test.go | 34 ++- .../pkg/composable/providers/docker/docker.go | 83 +++--- .../pkg/composable/providers/host/host.go | 48 ++-- .../composable/providers/host/host_test.go | 17 +- .../providers/kubernetes/kubernetes.go | 34 ++- .../kubernetes_leaderelection.go | 42 +-- .../kubernetessecrets/kubernetes_secrets.go | 24 +- .../kubernetes_secrets_test.go | 47 +++- internal/pkg/core/composable/providers.go | 7 +- 23 files changed, 799 insertions(+), 495 deletions(-) create mode 100644 internal/pkg/agent/application/coordinator/coordinator.go create mode 100644 internal/pkg/agent/configuration/grpc.go diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 7bc0089940f..17ff9d4d83e 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -7,8 +7,12 @@ package application import ( "context" "fmt" - + "github.com/elastic/elastic-agent/internal/pkg/capabilities" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/go-sysinfo" "go.elastic.co/apm" + goruntime "runtime" + "strconv" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" @@ -24,10 +28,12 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +// PlatformModifier can modify the platform details before the runtime specifications are loaded. +type PlatformModifier func(detail component.PlatformDetail) component.PlatformDetail + // Application is the application interface implemented by the different running mode. type Application interface { - Start() error - Stop() error + Run(ctx context.Context) error AgentInfo() *info.AgentInfo Routes() *sorted.Set } @@ -48,59 +54,83 @@ func New( uc upgraderControl, agentInfo *info.AgentInfo, tracer *apm.Tracer, + modifiers ...PlatformModifier, ) (Application, error) { - // Load configuration from disk to understand in which mode of operation - // we must start the elastic-agent, the mode of operation cannot be changed without restarting the - // elastic-agent. - pathConfigFile := paths.ConfigFile() - rawConfig, err := config.LoadFile(pathConfigFile) + log.Info("Gathering system information") + platform, err := getPlatformDetail(modifiers...) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to gather system information: %w", err) } - if err := info.InjectAgentConfig(rawConfig); err != nil { - return nil, err + log.Info("Detecting available inputs and outputs") + specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) + if err != nil { + return nil, fmt.Errorf("failed to detect inputs and outputs: %w", err) } - return createApplication(log, pathConfigFile, rawConfig, reexec, statusCtrl, uc, agentInfo, tracer) -} + log.Info("Determine allowed capabilities") + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) + if err != nil { + return nil, fmt.Errorf("failed to determine capabilities: %w", err) + } -func createApplication( - log *logger.Logger, - pathConfigFile string, - rawConfig *config.Config, - reexec reexecManager, - statusCtrl status.Controller, - uc upgraderControl, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (Application, error) { - log.Info("Detecting execution mode") - ctx := context.Background() + log.Info("Parsing configuration and determining execution mode") + pathConfigFile := paths.ConfigFile() + rawConfig, err := config.LoadFile(pathConfigFile) + if err != nil { + return nil, fmt.Errorf("failed to load configuration: %w", err) + } + if err := info.InjectAgentConfig(rawConfig); err != nil { + return nil, fmt.Errorf("failed to load configuration: %w", err) + } cfg, err := configuration.NewFromConfig(rawConfig) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to load configuration: %w", err) } if configuration.IsStandalone(cfg.Fleet) { log.Info("Agent is managed locally") - return newLocal(ctx, log, paths.ConfigFile(), rawConfig, reexec, statusCtrl, uc, agentInfo, tracer) + return newLocal(log, specs, caps, cfg, paths.ConfigFile(), rawConfig, reexec, statusCtrl, uc, agentInfo, tracer) } // not in standalone; both modes require reading the fleet.yml configuration file - var store storage.Store - store, cfg, err = mergeFleetConfig(rawConfig) - if err != nil { - return nil, err - } + //var store storage.Store + //store, cfg, err = mergeFleetConfig(rawConfig) + //if err != nil { + // return nil, err + //} if configuration.IsFleetServerBootstrap(cfg.Fleet) { log.Info("Agent is in Fleet Server bootstrap mode") - return newFleetServerBootstrap(ctx, log, pathConfigFile, rawConfig, statusCtrl, agentInfo, tracer) + //return newFleetServerBootstrap(ctx, log, pathConfigFile, rawConfig, statusCtrl, agentInfo, tracer) + return nil, errors.New("TODO: fleet-server bootstrap mode") } log.Info("Agent is managed by Fleet") - return newManaged(ctx, log, store, cfg, rawConfig, reexec, statusCtrl, agentInfo, tracer) + //return newManaged(ctx, log, store, cfg, rawConfig, reexec, statusCtrl, agentInfo, tracer) + return nil, errors.New("TODO: fleet mode") +} + +func getPlatformDetail(modifiers ...PlatformModifier) (component.PlatformDetail, error) { + info, err := sysinfo.Host() + if err != nil { + return component.PlatformDetail{}, err + } + os := info.Info().OS + detail := component.PlatformDetail{ + Platform: component.Platform{ + OS: goruntime.GOOS, + Arch: goruntime.GOARCH, + GOOS: goruntime.GOOS, + }, + Family: os.Family, + Major: strconv.Itoa(os.Major), + Minor: strconv.Itoa(os.Minor), + } + for _, modifier := range modifiers { + detail = modifier(detail) + } + return detail, nil } func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.Configuration, error) { diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go new file mode 100644 index 00000000000..49cb5a6eabc --- /dev/null +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -0,0 +1,261 @@ +package coordinator + +import ( + "context" + "errors" + "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" + "github.com/elastic/elastic-agent/internal/pkg/capabilities" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" + "go.elastic.co/apm" +) + +// Runner provides interface to run a manager. +type Runner interface { + // Run runs the manager. + Run(context.Context) error +} + +// RuntimeManager provides an interface to run and update the runtime. +type RuntimeManager interface { + Runner + + // Update updates the current components model. + Update([]component.Component) error +} + +// ConfigManager provides an interface to run and watch for configuration changes. +type ConfigManager interface { + Runner + + // Watch returns the chanel to watch for configuration changes. + Watch() <-chan *config.Config +} + +// VarsManager provides an interface to run and watch for variable changes. +type VarsManager interface { + Runner + + // Watch returns the chanel to watch for variable changes. + Watch() <-chan []*transpiler.Vars +} + +// ComponentsModifier is a function that takes the computed components model and modifies it before +// passing it into the components runtime manager. +type ComponentsModifier func(comps []component.Component) ([]component.Component, error) + +// Coordinator manages the intersection between configuration change and updated variables. +type Coordinator struct { + logger *logger.Logger + + specs component.RuntimeSpecs + + runtime RuntimeManager + config ConfigManager + vars VarsManager + + caps capabilities.Capability + modifiers []ComponentsModifier + + state coordinatorState +} + +// New creates a new coordinator. +func New(logger *logger.Logger, specs component.RuntimeSpecs, runtime RuntimeManager, config ConfigManager, vars VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { + return &Coordinator{ + logger: logger, + specs: specs, + runtime: runtime, + config: config, + vars: vars, + caps: caps, + modifiers: modifiers, + } +} + +// Run runs the coordinator. +// +// The RuntimeManager, ConfigManager and VarsManager that is passed into NewCoordinator are also ran and lifecycle controlled by the Run. +func (c *Coordinator) Run(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + runtimeWatcher := c.runtime + runtimeRun := make(chan bool) + runtimeErrCh := make(chan error) + go func(manager Runner) { + err := manager.Run(ctx) + close(runtimeRun) + runtimeErrCh <- err + }(runtimeWatcher) + + configWatcher := c.config + configRun := make(chan bool) + configErrCh := make(chan error) + go func(manager Runner) { + err := manager.Run(ctx) + close(configRun) + configErrCh <- err + }(configWatcher) + + varsWatcher := c.vars + varsRun := make(chan bool) + varsErrCh := make(chan error) + go func(manager Runner) { + err := manager.Run(ctx) + close(varsRun) + varsErrCh <- err + }(varsWatcher) + + for { + select { + case <-ctx.Done(): + runtimeErr := <-runtimeErrCh + if runtimeErr != nil && !errors.Is(runtimeErr, context.Canceled) { + return runtimeErr + } + configErr := <-configErrCh + if configErr != nil && !errors.Is(configErr, context.Canceled) { + return configErr + } + varsErr := <-varsErrCh + if varsErr != nil && !errors.Is(varsErr, context.Canceled) { + return varsErr + } + return ctx.Err() + case <-runtimeRun: + if ctx.Err() == nil { + cancel() + } + case <-configRun: + if ctx.Err() == nil { + cancel() + } + case <-varsRun: + if ctx.Err() == nil { + cancel() + } + case cfg := <-configWatcher.Watch(): + if ctx.Err() == nil { + if err := c.processConfig(ctx, cfg); err != nil { + c.logger.Errorf("%s", err) + } + } + case vars := <-varsWatcher.Watch(): + if ctx.Err() == nil { + if err := c.processVars(ctx, vars); err != nil { + c.logger.Errorf("%s", err) + } + } + } + } +} + +func (c *Coordinator) processConfig(ctx context.Context, cfg *config.Config) (err error) { + span, ctx := apm.StartSpan(ctx, "config", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + if err := info.InjectAgentConfig(cfg); err != nil { + return err + } + + // perform and verify ast translation + m, err := cfg.ToMapStr() + if err != nil { + return fmt.Errorf("could not create the AST from the configuration: %w", err) + } + rawAst, err := transpiler.NewAST(m) + if err != nil { + return fmt.Errorf("could not create the AST from the configuration: %w", err) + } + + if c.caps != nil { + var ok bool + updatedAst, err := c.caps.Apply(rawAst) + if err != nil { + return fmt.Errorf("failed to apply capabilities: %w", err) + } + + rawAst, ok = updatedAst.(*transpiler.AST) + if !ok { + return fmt.Errorf("failed to transform object returned from capabilities to AST: %w", err) + } + } + + c.state.config = cfg + c.state.ast = rawAst + + if c.state.vars != nil { + return c.process(ctx) + } + return nil +} + +func (c *Coordinator) processVars(ctx context.Context, vars []*transpiler.Vars) (err error) { + span, ctx := apm.StartSpan(ctx, "vars", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + c.state.vars = vars + + if c.state.ast != nil { + return c.process(ctx) + } + return nil +} + +func (c *Coordinator) process(ctx context.Context) (err error) { + span, ctx := apm.StartSpan(ctx, "process", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + ast := c.state.ast.Clone() + inputs, ok := transpiler.Lookup(ast, "inputs") + if ok { + renderedInputs, err := transpiler.RenderInputs(inputs, c.state.vars) + if err != nil { + return fmt.Errorf("rendering inputs failed: %w", err) + } + err = transpiler.Insert(ast, renderedInputs, "inputs") + if err != nil { + return fmt.Errorf("inserting rendered inputs failed: %w", err) + } + } + + cfg, err := ast.Map() + if err != nil { + return fmt.Errorf("failed to convert ast to map[string]interface{}: %w", err) + } + comps, err := c.specs.ToComponents(cfg) + if err != nil { + return fmt.Errorf("failed to render components: %w", err) + } + + for _, modifier := range c.modifiers { + comps, err = modifier(comps) + if err != nil { + return fmt.Errorf("failed to modify components: %w", err) + } + } + + c.logger.Info("Updating running component model") + c.logger.With("components", comps).Debug("Updating running component model") + return c.runtime.Update(comps) +} + +type coordinatorState struct { + config *config.Config + ast *transpiler.AST + vars []*transpiler.Vars + components []component.Component +} diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 9b72b177eb9..069ce839161 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -4,6 +4,7 @@ package application +/* import ( "context" @@ -233,3 +234,4 @@ func emit(ctx context.Context, log *logger.Logger, agentInfo transpiler.AgentInf }, }) } +*/ diff --git a/internal/pkg/agent/application/local_mode.go b/internal/pkg/agent/application/local_mode.go index 29f311fe582..599174b4fa8 100644 --- a/internal/pkg/agent/application/local_mode.go +++ b/internal/pkg/agent/application/local_mode.go @@ -6,34 +6,27 @@ package application import ( "context" - "path/filepath" - + "fmt" "go.elastic.co/apm" + "path/filepath" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/stream" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" + "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/dir" acker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" "github.com/elastic/elastic-agent/internal/pkg/sorted" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) type discoverFunc func() ([]string, error) @@ -44,24 +37,26 @@ var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) // Local represents a standalone agents, that will read his configuration directly from disk. // Some part of the configuration can be reloaded. type Local struct { - bgContext context.Context - cancelCtxFn context.CancelFunc log *logger.Logger - router pipeline.Router - source source agentInfo *info.AgentInfo - srv *server.Server -} + caps capabilities.Capability + reexec reexecManager + uc upgraderControl + downloadCfg *artifact.Config + + runtime coordinator.RuntimeManager + config coordinator.ConfigManager + composable coordinator.VarsManager -type source interface { - Start() error - Stop() error + coordinator *coordinator.Coordinator } -// newLocal return a agent managed by local configuration. +// newLocal return an agent managed by local configuration. func newLocal( - ctx context.Context, log *logger.Logger, + specs component.RuntimeSpecs, + caps capabilities.Capability, + cfg *configuration.Configuration, pathConfigFile string, rawConfig *config.Config, reexec reexecManager, @@ -70,96 +65,36 @@ func newLocal( agentInfo *info.AgentInfo, tracer *apm.Tracer, ) (*Local, error) { - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) - if err != nil { - return nil, err - } - - cfg, err := configuration.NewFromConfig(rawConfig) - if err != nil { - return nil, err - } - - if log == nil { - log, err = logger.NewFromConfig("", cfg.Settings.LoggingConfig, true) - if err != nil { - return nil, err - } - } - - logR := logreporter.NewReporter(log) - localApplication := &Local{ - log: log, - agentInfo: agentInfo, - } - - localApplication.bgContext, localApplication.cancelCtxFn = context.WithCancel(ctx) - localApplication.srv, err = server.NewFromConfig(log, cfg.Settings.GRPC, &operation.ApplicationStatusHandler{}, tracer) - if err != nil { - return nil, errors.New(err, "initialize GRPC listener") - } - - reporter := reporting.NewReporter(localApplication.bgContext, log, localApplication.agentInfo, logR) - - monitor, err := monitoring.NewMonitor(cfg.Settings) - if err != nil { - return nil, errors.New(err, "failed to initialize monitoring") - } - - router, err := router.New(log, stream.Factory(localApplication.bgContext, agentInfo, cfg.Settings, localApplication.srv, reporter, monitor, statusCtrl)) - if err != nil { - return nil, errors.New(err, "fail to initialize pipeline router") - } - localApplication.router = router - - composableCtrl, err := composable.New(log, rawConfig) - if err != nil { - return nil, errors.New(err, "failed to initialize composable controller") - } - - discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) - emit, err := emitter.New( - localApplication.bgContext, - log, - agentInfo, - composableCtrl, - router, - &pipeline.ConfigModifiers{ - Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}, - Filters: []pipeline.FilterFunc{filters.StreamChecker}, - }, - caps, - monitor, - ) - if err != nil { - return nil, err + log: log, + agentInfo: agentInfo, + caps: caps, + reexec: reexec, + uc: uc, + downloadCfg: cfg.Settings.DownloadConfig, } loader := config.NewLoader(log, externalConfigsGlob()) - - var cfgSource source + discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) if !cfg.Settings.Reload.Enabled { log.Debug("Reloading of configuration is off") - cfgSource = newOnce(log, discover, loader, emit) + localApplication.config = newOnce(log, discover, loader) } else { log.Debugf("Reloading of configuration is on, frequency is set to %s", cfg.Settings.Reload.Period) - cfgSource = newPeriodic(log, cfg.Settings.Reload.Period, discover, loader, emit) + localApplication.config = newPeriodic(log, cfg.Settings.Reload.Period, discover, loader) } - localApplication.source = cfgSource + var err error + localApplication.runtime, err = runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) + if err != nil { + return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) + } + localApplication.composable, err = composable.New(log, rawConfig) + if err != nil { + return nil, errors.New(err, "failed to initialize composable controller") + } - // create a upgrader to use in local mode - upgrader := upgrade.NewUpgrader( - agentInfo, - cfg.Settings.DownloadConfig, - log, - []context.CancelFunc{localApplication.cancelCtxFn}, - reexec, - acker.NewAcker(), - reporter, - caps) - uc.SetUpgrader(upgrader) + localApplication.coordinator = coordinator.New(log, specs, localApplication.runtime, localApplication.config, localApplication.composable, caps) return localApplication, nil } @@ -170,30 +105,29 @@ func externalConfigsGlob() string { // Routes returns a list of routes handled by agent. func (l *Local) Routes() *sorted.Set { - return l.router.Routes() + return nil } -// Start starts a local agent. -func (l *Local) Start() error { - l.log.Info("Agent is starting") - defer l.log.Info("Agent is stopped") - - if err := l.srv.Start(); err != nil { - return err - } - if err := l.source.Start(); err != nil { - return err - } +// Run runs the local agent. +// +// Blocks until the context is cancelled. +func (l *Local) Run(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + + u := upgrade.NewUpgrader( + l.agentInfo, + l.downloadCfg, + l.log, + []context.CancelFunc{cancel}, + l.reexec, + acker.NewAcker(), + nil, + l.caps) + l.uc.SetUpgrader(u) - return nil -} + err := l.coordinator.Run(ctx) -// Stop stops a local agent. -func (l *Local) Stop() error { - err := l.source.Stop() - l.cancelCtxFn() - l.router.Shutdown() - l.srv.Stop() + l.uc.SetUpgrader(nil) return err } diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index d334ae0198c..036ffebdc89 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -4,14 +4,16 @@ package application +/* import ( "context" "fmt" - + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/artifact" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/component/runtime" "go.elastic.co/apm" - "github.com/elastic/go-sysinfo" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" fleetgateway "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway/fleet" @@ -62,34 +64,37 @@ type stateStore interface { // Managed application, when the application is run in managed mode, most of the configuration are // coming from the Fleet App. type Managed struct { - bgContext context.Context - cancelCtxFn context.CancelFunc - log *logger.Logger - Config configuration.FleetAgentConfig + log *logger.Logger + Config configuration.FleetAgentConfig + gateway gateway.FleetGateway + stateStore stateStore + upgrader *upgrade.Upgrader + agentInfo *info.AgentInfo - gateway gateway.FleetGateway - router pipeline.Router - srv *server.Server - stateStore stateStore - upgrader *upgrade.Upgrader + caps capabilities.Capability + reexec reexecManager + uc upgraderControl + downloadCfg *artifact.Config + + runtime coordinator.RuntimeManager + config coordinator.ConfigManager + composable coordinator.VarsManager + + coordinator *coordinator.Coordinator } func newManaged( - ctx context.Context, log *logger.Logger, - storeSaver storage.Store, + specs component.RuntimeSpecs, + caps capabilities.Capability, cfg *configuration.Configuration, + storeSaver storage.Store, rawConfig *config.Config, reexec reexecManager, statusCtrl status.Controller, agentInfo *info.AgentInfo, tracer *apm.Tracer, ) (*Managed, error) { - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) - if err != nil { - return nil, err - } - client, err := client.NewAuthWithConfig(log, cfg.Fleet.AccessAPIKey, cfg.Fleet.Client) if err != nil { return nil, errors.New(err, @@ -98,69 +103,22 @@ func newManaged( errors.M(errors.MetaKeyURI, cfg.Fleet.Client.Host)) } - sysInfo, err := sysinfo.Host() - if err != nil { - return nil, errors.New(err, - "fail to get system information", - errors.TypeUnexpected) - } - managedApplication := &Managed{ log: log, agentInfo: agentInfo, } - managedApplication.bgContext, managedApplication.cancelCtxFn = context.WithCancel(ctx) - managedApplication.srv, err = server.NewFromConfig(log, cfg.Settings.GRPC, &operation.ApplicationStatusHandler{}, tracer) - if err != nil { - return nil, errors.New(err, "initialize GRPC listener", errors.TypeNetwork) - } - // must start before `Start` is called as Fleet will already try to start applications - // before `Start` is even called. - err = managedApplication.srv.Start() + managedApplication.runtime, err = runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) if err != nil { - return nil, errors.New(err, "starting GRPC listener", errors.TypeNetwork) + return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) } - - logR := logreporter.NewReporter(log) - fleetR, err := fleetreporter.NewReporter(agentInfo, log, cfg.Fleet.Reporting) - if err != nil { - return nil, errors.New(err, "fail to create reporters") - } - - combinedReporter := reporting.NewReporter(managedApplication.bgContext, log, agentInfo, logR, fleetR) - monitor, err := monitoring.NewMonitor(cfg.Settings) - if err != nil { - return nil, errors.New(err, "failed to initialize monitoring") - } - - router, err := router.New(log, stream.Factory(managedApplication.bgContext, agentInfo, cfg.Settings, managedApplication.srv, combinedReporter, monitor, statusCtrl)) - if err != nil { - return nil, errors.New(err, "fail to initialize pipeline router") - } - managedApplication.router = router - - composableCtrl, err := composable.New(log, rawConfig) + managedApplication.composable, err = composable.New(log, rawConfig) if err != nil { return nil, errors.New(err, "failed to initialize composable controller") } - emit, err := emitter.New( - managedApplication.bgContext, - log, - agentInfo, - composableCtrl, - router, - &pipeline.ConfigModifiers{ - Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}, - Filters: []pipeline.FilterFunc{filters.StreamChecker, modifiers.InjectFleet(rawConfig, sysInfo.Info(), agentInfo)}, - }, - caps, - monitor, - ) - if err != nil { - return nil, err - } + managedApplication.coordinator = coordinator.New(log, specs, managedApplication.runtime, managedApplication.config, managedApplication.composable, caps) + acker, err := fleet.NewAcker(log, agentInfo, client) if err != nil { return nil, err @@ -359,3 +317,4 @@ func (m *Managed) wasUnenrolled() bool { return false } +*/ diff --git a/internal/pkg/agent/application/once.go b/internal/pkg/agent/application/once.go index 19a17c61df9..a87b2f97151 100644 --- a/internal/pkg/agent/application/once.go +++ b/internal/pkg/agent/application/once.go @@ -6,8 +6,8 @@ package application import ( "context" + "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -17,14 +17,14 @@ type once struct { log *logger.Logger discover discoverFunc loader *config.Loader - emitter pipeline.EmitterFunc + ch chan *config.Config } -func newOnce(log *logger.Logger, discover discoverFunc, loader *config.Loader, emitter pipeline.EmitterFunc) *once { - return &once{log: log, discover: discover, loader: loader, emitter: emitter} +func newOnce(log *logger.Logger, discover discoverFunc, loader *config.Loader) *once { + return &once{log: log, discover: discover, loader: loader, ch: make(chan *config.Config)} } -func (o *once) Start() error { +func (o *once) Run(ctx context.Context) error { files, err := o.discover() if err != nil { return errors.New(err, "could not discover configuration files", errors.TypeConfig) @@ -34,18 +34,23 @@ func (o *once) Start() error { return ErrNoConfiguration } - return readfiles(context.Background(), files, o.loader, o.emitter) + cfg, err := readfiles(files, o.loader) + if err != nil { + return err + } + o.ch <- cfg + <-ctx.Done() + return ctx.Err() } -func (o *once) Stop() error { - return nil +func (o *once) Watch() <-chan *config.Config { + return o.ch } -func readfiles(ctx context.Context, files []string, loader *config.Loader, emitter pipeline.EmitterFunc) error { +func readfiles(files []string, loader *config.Loader) (*config.Config, error) { c, err := loader.Load(files) if err != nil { - return errors.New(err, "could not load or merge configuration", errors.TypeConfig) + return nil, fmt.Errorf("failed to load or merge configuration: %w", err) } - - return emitter(ctx, c) + return c, nil } diff --git a/internal/pkg/agent/application/periodic.go b/internal/pkg/agent/application/periodic.go index 10a3c26c11d..21900530c3e 100644 --- a/internal/pkg/agent/application/periodic.go +++ b/internal/pkg/agent/application/periodic.go @@ -9,7 +9,6 @@ import ( "strings" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/filewatcher" @@ -19,35 +18,34 @@ import ( type periodic struct { log *logger.Logger period time.Duration - done chan struct{} watcher *filewatcher.Watch loader *config.Loader - emitter pipeline.EmitterFunc discover discoverFunc + ch chan *config.Config } -func (p *periodic) Start() error { - go func() { - if err := p.work(); err != nil { - p.log.Debugf("Failed to read configuration, error: %s", err) +func (p *periodic) Run(ctx context.Context) error { + if err := p.work(); err != nil { + return err + } + + t := time.NewTicker(p.period) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: } - WORK: - for { - t := time.NewTimer(p.period) - select { - case <-p.done: - t.Stop() - break WORK - case <-t.C: - } - - if err := p.work(); err != nil { - p.log.Debugf("Failed to read configuration, error: %s", err) - } + if err := p.work(); err != nil { + return err } - }() - return nil + } +} + +func (p *periodic) Watch() <-chan *config.Config { + return p.ch } func (p *periodic) work() error { @@ -92,30 +90,26 @@ func (p *periodic) work() error { p.log.Debugf("Unchanged %d files: %s", len(s.Unchanged), strings.Join(s.Updated, ", ")) } - err := readfiles(context.Background(), files, p.loader, p.emitter) + cfg, err := readfiles(files, p.loader) if err != nil { // assume something when really wrong and invalidate any cache // so we get a full new config on next tick. p.watcher.Invalidate() - return errors.New(err, "could not emit configuration") + return err } + p.ch <- cfg + return nil } p.log.Info("No configuration change") return nil } -func (p *periodic) Stop() error { - close(p.done) - return nil -} - func newPeriodic( log *logger.Logger, period time.Duration, discover discoverFunc, loader *config.Loader, - emitter pipeline.EmitterFunc, ) *periodic { w, err := filewatcher.New(log, filewatcher.DefaultComparer) @@ -127,10 +121,9 @@ func newPeriodic( return &periodic{ log: log, period: period, - done: make(chan struct{}), watcher: w, discover: discover, loader: loader, - emitter: emitter, + ch: make(chan *config.Config), } } diff --git a/internal/pkg/agent/application/pipeline/emitter/emitter.go b/internal/pkg/agent/application/pipeline/emitter/emitter.go index ac94d48d8b4..9fd15edb7e3 100644 --- a/internal/pkg/agent/application/pipeline/emitter/emitter.go +++ b/internal/pkg/agent/application/pipeline/emitter/emitter.go @@ -12,7 +12,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -22,9 +21,7 @@ import ( // New creates a new emitter function. func New(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, controller composable.Controller, router pipeline.Router, modifiers *pipeline.ConfigModifiers, caps capabilities.Capability, reloadables ...reloadable) (pipeline.EmitterFunc, error) { ctrl := NewController(log, agentInfo, controller, router, modifiers, caps, reloadables...) - err := controller.Run(ctx, func(vars []*transpiler.Vars) { - ctrl.Set(ctx, vars) - }) + err := controller.Run(ctx) if err != nil { return nil, errors.New(err, "failed to start composable controller") } diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index d7832f48772..f14c864659c 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -374,14 +374,15 @@ func newWaitForCompose(wrapped composable.Controller) *waitForCompose { } } -func (w *waitForCompose) Run(ctx context.Context, cb composable.VarsCallback) error { - err := w.controller.Run(ctx, func(vars []*transpiler.Vars) { - cb(vars) - w.done <- true - }) +func (w *waitForCompose) Run(ctx context.Context) error { + err := w.controller.Run(ctx) return err } +func (w *waitForCompose) Watch() <-chan []*transpiler.Vars { + return nil +} + func (w *waitForCompose) Wait() { <-w.done } diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index b584baf2f09..e3d30e054b4 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -65,7 +65,7 @@ func newRunCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { } } -func run(override cfgOverrider) error { +func run(override cfgOverrider, modifiers ...application.PlatformModifier) error { // Windows: Mark service as stopped. // After this is run, the service is considered by the OS to be stopped. // This must be the first deferred cleanup task (last to execute). @@ -123,7 +123,7 @@ func run(override cfgOverrider) error { // that writes the agentID into fleet.enc (encrypted fleet.yml) before even loading the configuration. err = secret.CreateAgentSecret() if err != nil { - return err + return fmt.Errorf("failed to read/write secrets: %w", err) } agentInfo, err := info.NewAgentInfoWithLog(defaultLogLevel(cfg), createAgentID) @@ -174,7 +174,7 @@ func run(override cfgOverrider) error { } defer control.Stop() - app, err := application.New(logger, rex, statusCtrl, control, agentInfo, tracer) + app, err := application.New(logger, rex, statusCtrl, control, agentInfo, tracer, modifiers...) if err != nil { return err } @@ -190,9 +190,15 @@ func run(override cfgOverrider) error { _ = serverStopFn() }() - if err := app.Start(); err != nil { - return err - } + appDone := make(chan bool) + appErrCh := make(chan error) + ctx, cancel = context.WithCancel(context.Background()) + defer cancel() + go func() { + err := app.Run(ctx) + close(appDone) + appErrCh <- err + }() // listen for signals signals := make(chan os.Signal, 1) @@ -203,6 +209,8 @@ func run(override cfgOverrider) error { select { case <-stop: breakout = true + case <-appDone: + breakout = true case <-rex.ShutdownChan(): reexecing = true breakout = true @@ -222,7 +230,9 @@ func run(override cfgOverrider) error { } } - err = app.Stop() + cancel() + err = <-appErrCh + if !reexecing { logger.Info("Shutting down completed.") return err diff --git a/internal/pkg/agent/configuration/grpc.go b/internal/pkg/agent/configuration/grpc.go new file mode 100644 index 00000000000..24d095e068d --- /dev/null +++ b/internal/pkg/agent/configuration/grpc.go @@ -0,0 +1,22 @@ +package configuration + +import "fmt" + +// GRPCConfig is a configuration of GRPC server. +type GRPCConfig struct { + Address string `config:"address"` + Port uint16 `config:"port"` +} + +// DefaultGRPCConfig creates a default server configuration. +func DefaultGRPCConfig() *GRPCConfig { + return &GRPCConfig{ + Address: "localhost", + Port: 6789, + } +} + +// String returns the composed listen address for the GRPC. +func (cfg *GRPCConfig) String() string { + return fmt.Sprintf("%s:%d", cfg.Address, cfg.Port) +} diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 7445f02a462..7c2c422a65b 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -12,7 +12,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" ) // ExternalInputsPattern is a glob that matches the paths of external configuration files. @@ -22,7 +21,7 @@ var ExternalInputsPattern = filepath.Join("inputs.d", "*.yml") type SettingsConfig struct { DownloadConfig *artifact.Config `yaml:"download" config:"download" json:"download"` ProcessConfig *process.Config `yaml:"process" config:"process" json:"process"` - GRPC *server.Config `yaml:"grpc" config:"grpc" json:"grpc"` + GRPC *GRPCConfig `yaml:"grpc" config:"grpc" json:"grpc"` RetryConfig *retry.Config `yaml:"retry" config:"retry" json:"retry"` MonitoringConfig *monitoringCfg.MonitoringConfig `yaml:"monitoring" config:"monitoring" json:"monitoring"` LoggingConfig *logger.Config `yaml:"logging,omitempty" config:"logging,omitempty" json:"logging,omitempty"` @@ -40,7 +39,7 @@ func DefaultSettingsConfig() *SettingsConfig { DownloadConfig: artifact.DefaultConfig(), LoggingConfig: logger.DefaultLoggingConfig(), MonitoringConfig: monitoringCfg.DefaultConfig(), - GRPC: server.DefaultGRPCConfig(), + GRPC: DefaultGRPCConfig(), Reload: DefaultReloadConfig(), } } diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 6b4e717fa73..fcf5f8f6c54 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -7,14 +7,12 @@ package install import ( "context" "fmt" + "github.com/kardianos/service" "os" "os/exec" "path/filepath" "runtime" "strings" - "sync" - - "github.com/kardianos/service" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" @@ -233,19 +231,12 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) inputs, ok := transpiler.Lookup(ast, "inputs") if ok { varsArray := make([]*transpiler.Vars, 0) - var wg sync.WaitGroup - wg.Add(1) - varsCallback := func(vv []*transpiler.Vars) { - varsArray = vv - wg.Done() - } ctrl, err := composable.New(log, cfg) if err != nil { return nil, err } - _ = ctrl.Run(ctx, varsCallback) - wg.Wait() + _ = ctrl.Run(ctx) renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) if err != nil { diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index a14e111194f..7ee0dc385ca 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -22,19 +22,21 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) -// VarsCallback is callback called when the current vars state changes. -type VarsCallback func([]*transpiler.Vars) - // Controller manages the state of the providers current context. type Controller interface { // Run runs the controller. // // Cancelling the context stops the controller. - Run(ctx context.Context, cb VarsCallback) error + Run(ctx context.Context) error + + // Watch returns the channel to watch for variable changes. + Watch() <-chan []*transpiler.Vars } // controller manages the state of the providers current context. type controller struct { + logger *logger.Logger + ch chan []*transpiler.Vars contextProviders map[string]*contextProviderState dynamicProviders map[string]*dynamicProviderState } @@ -87,28 +89,39 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { } return &controller{ + logger: l, + ch: make(chan []*transpiler.Vars), contextProviders: contextProviders, dynamicProviders: dynamicProviders, }, nil } // Run runs the controller. -func (c *controller) Run(ctx context.Context, cb VarsCallback) error { - // large number not to block performing Run on the provided providers - notify := make(chan bool, 5000) +func (c *controller) Run(ctx context.Context) error { + c.logger.Debugf("Starting controller for composable inputs") + defer c.logger.Debugf("Stopped controller for composable inputs") + + notify := make(chan bool) localCtx, cancel := context.WithCancel(ctx) + defer cancel() fetchContextProviders := mapstr.M{} + var wg sync.WaitGroup + wg.Add(len(c.contextProviders) + len(c.dynamicProviders)) + // run all the enabled context providers for name, state := range c.contextProviders { state.Context = localCtx state.signal = notify - err := state.provider.Run(state) - if err != nil { - cancel() - return errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) - } + go func() { + defer wg.Done() + err := state.provider.Run(state) + if err != nil && !errors.Is(err, context.Canceled) { + err = errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) + c.logger.Errorf("%s", err) + } + }() if p, ok := state.provider.(corecomp.FetchContextProvider); ok { _, _ = fetchContextProviders.Put(name, p) } @@ -118,65 +131,68 @@ func (c *controller) Run(ctx context.Context, cb VarsCallback) error { for name, state := range c.dynamicProviders { state.Context = localCtx state.signal = notify - err := state.provider.Run(state) - if err != nil { - cancel() - return errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) - } + go func() { + defer wg.Done() + err := state.provider.Run(state) + if err != nil && !errors.Is(err, context.Canceled) { + err = errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) + c.logger.Errorf("%s", err) + } + }() } - go func() { + c.logger.Debugf("Started controller for composable inputs") + + // performs debounce of notifies; accumulates them into 100 millisecond chunks + t := time.NewTimer(100 * time.Millisecond) + for { + DEBOUNCE: for { - // performs debounce of notifies; accumulates them into 100 millisecond chunks - changed := false - t := time.NewTimer(100 * time.Millisecond) - for { - exitloop := false - select { - case <-ctx.Done(): - cancel() - return - case <-notify: - changed = true - case <-t.C: - exitloop = true - } - if exitloop { - break - } + select { + case <-ctx.Done(): + c.logger.Debugf("Stopping controller for composable inputs") + t.Stop() + cancel() + wg.Wait() + return ctx.Err() + case <-notify: + t.Reset(100 * time.Millisecond) + c.logger.Debugf("Variable state changed for composable inputs; debounce started") + drainChan(notify) + case <-t.C: + break DEBOUNCE } + } - t.Stop() - if !changed { - continue - } + c.logger.Debugf("Computing new variable state for composable inputs") - // build the vars list of mappings - vars := make([]*transpiler.Vars, 1) - mapping := map[string]interface{}{} - for name, state := range c.contextProviders { - mapping[name] = state.Current() - } - // this is ensured not to error, by how the mappings states are verified - vars[0], _ = transpiler.NewVars(mapping, fetchContextProviders) - - // add to the vars list for each dynamic providers mappings - for name, state := range c.dynamicProviders { - for _, mappings := range state.Mappings() { - local, _ := cloneMap(mapping) // will not fail; already been successfully cloned once - local[name] = mappings.mapping - // this is ensured not to error, by how the mappings states are verified - v, _ := transpiler.NewVarsWithProcessors(local, name, mappings.processors, fetchContextProviders) - vars = append(vars, v) - } + // build the vars list of mappings + vars := make([]*transpiler.Vars, 1) + mapping := map[string]interface{}{} + for name, state := range c.contextProviders { + mapping[name] = state.Current() + } + // this is ensured not to error, by how the mappings states are verified + vars[0], _ = transpiler.NewVars(mapping, fetchContextProviders) + + // add to the vars list for each dynamic providers mappings + for name, state := range c.dynamicProviders { + for _, mappings := range state.Mappings() { + local, _ := cloneMap(mapping) // will not fail; already been successfully cloned once + local[name] = mappings.mapping + // this is ensured not to error, by how the mappings states are verified + v, _ := transpiler.NewVarsWithProcessors(local, name, mappings.processors, fetchContextProviders) + vars = append(vars, v) } - - // execute the callback - cb(vars) } - }() - return nil + c.ch <- vars + } +} + +// Watch returns the channel for variable changes. +func (c *controller) Watch() <-chan []*transpiler.Vars { + return c.ch } type contextProviderState struct { @@ -351,3 +367,13 @@ func addToSet(set []int, i int) []int { } return append(set, i) } + +func drainChan(ch chan bool) { + for { + select { + case <-ch: + default: + return + } + } +} diff --git a/internal/pkg/composable/controller_test.go b/internal/pkg/composable/controller_test.go index 09780767928..9469639dd79 100644 --- a/internal/pkg/composable/controller_test.go +++ b/internal/pkg/composable/controller_test.go @@ -6,8 +6,9 @@ package composable_test import ( "context" - "sync" + "errors" "testing" + "time" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -80,17 +81,34 @@ func TestController(t *testing.T) { c, err := composable.New(log, cfg) require.NoError(t, err) - var wg sync.WaitGroup ctx, cancel := context.WithCancel(context.Background()) defer cancel() - wg.Add(1) + + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 1*time.Second) + defer timeoutCancel() + var setVars []*transpiler.Vars - err = c.Run(ctx, func(vars []*transpiler.Vars) { - setVars = vars - wg.Done() - }) + go func() { + defer cancel() + for { + select { + case <-timeoutCtx.Done(): + return + case vars := <-c.Watch(): + setVars = vars + } + } + }() + + errCh := make(chan error) + go func() { + errCh <- c.Run(ctx) + }() + err = <-errCh + if errors.Is(err, context.Canceled) { + err = nil + } require.NoError(t, err) - wg.Wait() assert.Len(t, setVars, 3) diff --git a/internal/pkg/composable/providers/docker/docker.go b/internal/pkg/composable/providers/docker/docker.go index 4bdc6d11cfe..0652201a17f 100644 --- a/internal/pkg/composable/providers/docker/docker.go +++ b/internal/pkg/composable/providers/docker/docker.go @@ -54,54 +54,51 @@ func (c *dynamicProvider) Run(comm composable.DynamicProviderComm) error { c.logger.Infof("Docker provider skipped, unable to connect: %s", err) return nil } + defer watcher.Stop() - go func() { - for { - select { - case <-comm.Done(): - startListener.Stop() - stopListener.Stop() + for { + select { + case <-comm.Done(): + startListener.Stop() + stopListener.Stop() - // Stop all timers before closing the channel - for _, stopper := range stoppers { - stopper.Stop() - } - close(stopTrigger) - return - case event := <-startListener.Events(): - data, err := generateData(event) - if err != nil { - c.logger.Errorf("%s", err) - continue - } - if stopper, ok := stoppers[data.container.ID]; ok { - c.logger.Debugf("container %s is restarting, aborting pending stop", data.container.ID) - stopper.Stop() - delete(stoppers, data.container.ID) - return - } - err = comm.AddOrUpdate(data.container.ID, ContainerPriority, data.mapping, data.processors) - if err != nil { - c.logger.Errorf("%s", err) - } - case event := <-stopListener.Events(): - data, err := generateData(event) - if err != nil { - c.logger.Errorf("%s", err) - continue - } - stopper := time.AfterFunc(c.config.CleanupTimeout, func() { - stopTrigger <- data - }) - stoppers[data.container.ID] = stopper - case data := <-stopTrigger: + // Stop all timers before closing the channel + for _, stopper := range stoppers { + stopper.Stop() + } + close(stopTrigger) + return comm.Err() + case event := <-startListener.Events(): + data, err := generateData(event) + if err != nil { + c.logger.Errorf("%s", err) + continue + } + if stopper, ok := stoppers[data.container.ID]; ok { + c.logger.Debugf("container %s is restarting, aborting pending stop", data.container.ID) + stopper.Stop() delete(stoppers, data.container.ID) - comm.Remove(data.container.ID) + continue + } + err = comm.AddOrUpdate(data.container.ID, ContainerPriority, data.mapping, data.processors) + if err != nil { + c.logger.Errorf("%s", err) } + case event := <-stopListener.Events(): + data, err := generateData(event) + if err != nil { + c.logger.Errorf("%s", err) + continue + } + stopper := time.AfterFunc(c.config.CleanupTimeout, func() { + stopTrigger <- data + }) + stoppers[data.container.ID] = stopper + case data := <-stopTrigger: + delete(stoppers, data.container.ID) + comm.Remove(data.container.ID) } - }() - - return nil + } } // DynamicProviderBuilder builds the dynamic provider. diff --git a/internal/pkg/composable/providers/host/host.go b/internal/pkg/composable/providers/host/host.go index 25d53430a2f..a23b92f1d2b 100644 --- a/internal/pkg/composable/providers/host/host.go +++ b/internal/pkg/composable/providers/host/host.go @@ -50,34 +50,30 @@ func (c *contextProvider) Run(comm corecomp.ContextProviderComm) error { } // Update context when any host information changes. - go func() { - for { - t := time.NewTimer(c.CheckInterval) - select { - case <-comm.Done(): - t.Stop() - return - case <-t.C: - } - - updated, err := c.fetcher() - if err != nil { - c.logger.Warnf("Failed fetching latest host information: %s", err) - continue - } - if reflect.DeepEqual(current, updated) { - // nothing to do - continue - } - current = updated - err = comm.Set(updated) - if err != nil { - c.logger.Errorf("Failed updating mapping to latest host information: %s", err) - } + for { + t := time.NewTimer(c.CheckInterval) + select { + case <-comm.Done(): + t.Stop() + return comm.Err() + case <-t.C: } - }() - return nil + updated, err := c.fetcher() + if err != nil { + c.logger.Warnf("Failed fetching latest host information: %s", err) + continue + } + if reflect.DeepEqual(current, updated) { + // nothing to do + continue + } + current = updated + err = comm.Set(updated) + if err != nil { + c.logger.Errorf("Failed updating mapping to latest host information: %s", err) + } + } } // ContextProviderBuilder builds the context provider. diff --git a/internal/pkg/composable/providers/host/host_test.go b/internal/pkg/composable/providers/host/host_test.go index 8e117fcbeb4..869f6a82050 100644 --- a/internal/pkg/composable/providers/host/host_test.go +++ b/internal/pkg/composable/providers/host/host_test.go @@ -41,15 +41,28 @@ func TestContextProvider(t *testing.T) { require.Equal(t, 100*time.Millisecond, hostProvider.CheckInterval) ctx, cancel := context.WithCancel(context.Background()) + defer cancel() comm := ctesting.NewContextComm(ctx) - err = provider.Run(comm) + + go func() { + err = provider.Run(comm) + }() + + // wait for it to be called once + var wg sync.WaitGroup + wg.Add(1) + comm.CallOnSet(func() { + wg.Done() + }) + wg.Wait() + comm.CallOnSet(nil) + require.NoError(t, err) starting, err = ctesting.CloneMap(starting) require.NoError(t, err) require.Equal(t, starting, comm.Current()) // wait for it to be called again - var wg sync.WaitGroup wg.Add(1) comm.CallOnSet(func() { wg.Done() diff --git a/internal/pkg/composable/providers/kubernetes/kubernetes.go b/internal/pkg/composable/providers/kubernetes/kubernetes.go index 91367c5252f..e7d005aa8f1 100644 --- a/internal/pkg/composable/providers/kubernetes/kubernetes.go +++ b/internal/pkg/composable/providers/kubernetes/kubernetes.go @@ -54,37 +54,51 @@ func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable // Run runs the kubernetes context provider. func (p *dynamicProvider) Run(comm composable.DynamicProviderComm) error { + eventers := make([]Eventer, 0, 3) if p.config.Resources.Pod.Enabled { - err := p.watchResource(comm, "pod") + eventer, err := p.watchResource(comm, "pod") if err != nil { return err } + if eventer != nil { + eventers = append(eventers, eventer) + } } if p.config.Resources.Node.Enabled { - err := p.watchResource(comm, nodeScope) + eventer, err := p.watchResource(comm, nodeScope) if err != nil { return err } + if eventer != nil { + eventers = append(eventers, eventer) + } } if p.config.Resources.Service.Enabled { - err := p.watchResource(comm, "service") + eventer, err := p.watchResource(comm, "service") if err != nil { return err } + if eventer != nil { + eventers = append(eventers, eventer) + } + } + <-comm.Done() + for _, eventer := range eventers { + eventer.Stop() } - return nil + return comm.Err() } // watchResource initializes the proper watcher according to the given resource (pod, node, service) // and starts watching for such resource's events. func (p *dynamicProvider) watchResource( comm composable.DynamicProviderComm, - resourceType string) error { + resourceType string) (Eventer, error) { client, err := kubernetes.GetKubernetesClient(p.config.KubeConfig, p.config.KubeClientOptions) if err != nil { // info only; return nil (do nothing) p.logger.Debugf("Kubernetes provider for resource %s skipped, unable to connect: %s", resourceType, err) - return nil + return nil, nil } // Ensure that node is set correctly whenever the scope is set to "node". Make sure that node is empty @@ -105,7 +119,7 @@ func (p *dynamicProvider) watchResource( p.config.Node, err = kubernetes.DiscoverKubernetesNode(p.logger, nd) if err != nil { p.logger.Debugf("Kubernetes provider skipped, unable to discover node: %w", err) - return nil + return nil, nil } } else { @@ -114,15 +128,15 @@ func (p *dynamicProvider) watchResource( eventer, err := p.newEventer(resourceType, comm, client) if err != nil { - return errors.New(err, "couldn't create kubernetes watcher for resource %s", resourceType) + return nil, errors.New(err, "couldn't create kubernetes watcher for resource %s", resourceType) } err = eventer.Start() if err != nil { - return errors.New(err, "couldn't start kubernetes eventer for resource %s", resourceType) + return nil, errors.New(err, "couldn't start kubernetes eventer for resource %s", resourceType) } - return nil + return eventer, nil } // Eventer allows defining ways in which kubernetes resource events are observed and processed diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go index 410e13ec77d..4d27192bab5 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go @@ -27,11 +27,9 @@ func init() { } type contextProvider struct { - logger *logger.Logger - config *Config - comm corecomp.ContextProviderComm - leaderElection *leaderelection.LeaderElectionConfig - cancelLeaderElection context.CancelFunc + logger *logger.Logger + config *Config + leaderElection *leaderelection.LeaderElectionConfig } // ContextProviderBuilder builds the provider. @@ -44,7 +42,7 @@ func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.C if err != nil { return nil, errors.New(err, "failed to unpack configuration") } - return &contextProvider{logger, &cfg, nil, nil, nil}, nil + return &contextProvider{logger, &cfg, nil}, nil } // Run runs the leaderelection provider. @@ -91,57 +89,43 @@ func (p *contextProvider) Run(comm corecomp.ContextProviderComm) error { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { p.logger.Debugf("leader election lock GAINED, id %v", id) - p.startLeading() + p.startLeading(comm) }, OnStoppedLeading: func() { p.logger.Debugf("leader election lock LOST, id %v", id) - p.stopLeading() + p.stopLeading(comm) }, }, } - ctx, cancel := context.WithCancel(context.TODO()) - p.cancelLeaderElection = cancel - p.comm = comm - p.startLeaderElector(ctx) - return nil -} - -// startLeaderElector starts a Leader Elector in the background with the provided config -func (p *contextProvider) startLeaderElector(ctx context.Context) { le, err := leaderelection.NewLeaderElector(*p.leaderElection) if err != nil { p.logger.Errorf("error while creating Leader Elector: %v", err) } p.logger.Debugf("Starting Leader Elector") - go le.Run(ctx) + le.Run(comm) + p.logger.Debugf("Stopped Leader Elector") + return comm.Err() } -func (p *contextProvider) startLeading() { +func (p *contextProvider) startLeading(comm corecomp.ContextProviderComm) { mapping := map[string]interface{}{ "leader": true, } - err := p.comm.Set(mapping) + err := comm.Set(mapping) if err != nil { p.logger.Errorf("Failed updating leaderelection status to leader TRUE: %s", err) } } -func (p *contextProvider) stopLeading() { +func (p *contextProvider) stopLeading(comm corecomp.ContextProviderComm) { mapping := map[string]interface{}{ "leader": false, } - err := p.comm.Set(mapping) + err := comm.Set(mapping) if err != nil { p.logger.Errorf("Failed updating leaderelection status to leader FALSE: %s", err) } } - -// Stop signals the stop channel to force the leader election loop routine to stop. -func (p *contextProvider) Stop() { - if p.cancelLeaderElection != nil { - p.cancelLeaderElection() - } -} diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go index 0bc560295ed..debc501d3cc 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go @@ -7,6 +7,7 @@ package kubernetessecrets import ( "context" "strings" + "sync" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" @@ -30,7 +31,8 @@ type contextProviderK8sSecrets struct { logger *logger.Logger config *Config - client k8sclient.Interface + clientMx sync.Mutex + client k8sclient.Interface } // ContextProviderBuilder builds the context provider. @@ -43,12 +45,18 @@ func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.C if err != nil { return nil, errors.New(err, "failed to unpack configuration") } - return &contextProviderK8sSecrets{logger, &cfg, nil}, nil + return &contextProviderK8sSecrets{ + logger: logger, + config: &cfg, + }, nil } func (p *contextProviderK8sSecrets) Fetch(key string) (string, bool) { // key = "kubernetes_secrets.somenamespace.somesecret.value" - if p.client == nil { + p.clientMx.Lock() + client := p.client + p.clientMx.Unlock() + if client == nil { return "", false } tokens := strings.Split(key, ".") @@ -67,7 +75,7 @@ func (p *contextProviderK8sSecrets) Fetch(key string) (string, bool) { secretName := tokens[2] secretVar := tokens[3] - secretIntefrace := p.client.CoreV1().Secrets(ns) + secretIntefrace := client.CoreV1().Secrets(ns) ctx := context.TODO() secret, err := secretIntefrace.Get(ctx, secretName, metav1.GetOptions{}) if err != nil { @@ -89,8 +97,14 @@ func (p *contextProviderK8sSecrets) Run(comm corecomp.ContextProviderComm) error p.logger.Debugf("Kubernetes_secrets provider skipped, unable to connect: %s", err) return nil } + p.clientMx.Lock() p.client = client - return nil + p.clientMx.Unlock() + <-comm.Done() + p.clientMx.Lock() + p.client = nil + p.clientMx.Unlock() + return comm.Err() } func getK8sClient(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go index 4c80800a59b..f0b56bb82a6 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go @@ -6,7 +6,9 @@ package kubernetessecrets import ( "context" + ctesting "github.com/elastic/elastic-agent/internal/pkg/composable/testing" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -19,7 +21,6 @@ import ( "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/config" - corecomp "github.com/elastic/elastic-agent/internal/pkg/core/composable" ) const ( @@ -52,13 +53,31 @@ func Test_K8sSecretsProvider_Fetch(t *testing.T) { p, err := ContextProviderBuilder(logger, cfg) require.NoError(t, err) - fp, _ := p.(corecomp.FetchContextProvider) + fp, _ := p.(*contextProviderK8sSecrets) getK8sClientFunc = func(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { return client, nil } require.NoError(t, err) - _ = fp.Run(nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + comm := ctesting.NewContextComm(ctx) + + go func() { + _ = fp.Run(comm) + }() + + for { + fp.clientMx.Lock() + client := fp.client + fp.clientMx.Unlock() + if client != nil { + break + } + <-time.After(10 * time.Millisecond) + } + val, found := fp.Fetch("kubernetes_secrets.test_namespace.testing_secret.secret_value") assert.True(t, found) assert.Equal(t, val, pass) @@ -89,13 +108,31 @@ func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { p, err := ContextProviderBuilder(logger, cfg) require.NoError(t, err) - fp, _ := p.(corecomp.FetchContextProvider) + fp, _ := p.(*contextProviderK8sSecrets) getK8sClientFunc = func(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { return client, nil } require.NoError(t, err) - _ = fp.Run(nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + comm := ctesting.NewContextComm(ctx) + + go func() { + _ = fp.Run(comm) + }() + + for { + fp.clientMx.Lock() + client := fp.client + fp.clientMx.Unlock() + if client != nil { + break + } + <-time.After(10 * time.Millisecond) + } + val, found := fp.Fetch("kubernetes_secrets.test_namespace.testing_secretHACK.secret_value") assert.False(t, found) assert.EqualValues(t, val, "") diff --git a/internal/pkg/core/composable/providers.go b/internal/pkg/core/composable/providers.go index d87437e2dae..235e17d83fa 100644 --- a/internal/pkg/core/composable/providers.go +++ b/internal/pkg/core/composable/providers.go @@ -6,11 +6,12 @@ package composable import "context" -// FetchContextProvider is the interface that a context provider uses so as to be able to be called -// explicitly on demand by vars framework in order to fetch specific target values like a k8s secret. +// FetchContextProvider is the interface that a context provider uses allow variable values to be determined when the +// configuration is rendered versus it being known in advanced. type FetchContextProvider interface { ContextProvider - // Run runs the inventory provider. + + // Fetch tries to fetch a value for a variable. Fetch(string) (string, bool) } From a21aac2d6b201b32fbbbf450a52334b94807beaa Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 19 Jul 2022 16:00:10 -0400 Subject: [PATCH 08/19] Work on switching to the v2 runtime. --- .gitignore | 2 + control.proto | 74 +- .../{pipeline => }/actions/action.go | 4 +- .../handlers/handler_action_application.go | 41 +- .../actions/handlers/handler_action_cancel.go | 4 +- .../handlers/handler_action_policy_change.go | 58 +- .../handler_action_policy_change_test.go | 94 +- .../handler_action_policy_reassign.go | 4 +- .../handlers/handler_action_settings.go | 13 +- .../handlers/handler_action_unenroll.go | 42 +- .../handlers/handler_action_upgrade.go | 35 +- .../actions/handlers/handler_default.go | 4 +- .../actions/handlers/handler_unknown.go | 4 +- internal/pkg/agent/application/application.go | 146 ++- .../application/coordinator/coordinator.go | 369 +++++- .../{pipeline => }/dispatcher/dispatcher.go | 33 +- .../dispatcher/dispatcher_test.go | 59 +- .../gateway/fleet/fleet_gateway.go | 205 ++-- .../gateway/fleet/fleet_gateway_test.go | 306 +++-- .../gateway/fleet/noop_status_controller.go | 27 - .../pkg/agent/application/gateway/gateway.go | 15 +- internal/pkg/agent/application/local_mode.go | 158 --- .../pkg/agent/application/managed_mode.go | 406 +++---- .../agent/application/managed_mode_test.go | 8 +- internal/pkg/agent/application/once.go | 14 +- internal/pkg/agent/application/periodic.go | 32 +- .../pipeline/emitter/controller.go | 196 ---- .../application/pipeline/emitter/emitter.go | 36 - .../pipeline/emitter/emitter_test.go | 5 - .../emitter/modifiers/fleet_decorator.go | 76 -- .../emitter/modifiers/monitoring_decorator.go | 105 -- .../modifiers/monitoring_decorator_test.go | 686 ----------- .../agent/application/pipeline/pipeline.go | 69 -- .../application/pipeline/router/router.go | 121 -- .../pipeline/router/router_test.go | 233 ---- .../application/pipeline/stream/factory.go | 96 -- .../pipeline/stream/operator_stream.go | 62 - .../application/upgrade/error_checker.go | 10 +- .../agent/application/upgrade/step_mark.go | 4 +- .../pkg/agent/application/upgrade/upgrade.go | 149 +-- internal/pkg/agent/cmd/enroll_cmd.go | 43 +- internal/pkg/agent/cmd/inspect.go | 12 +- internal/pkg/agent/cmd/run.go | 43 +- internal/pkg/agent/cmd/status.go | 36 +- internal/pkg/agent/control/client/client.go | 124 +- .../pkg/agent/control/cproto/control.pb.go | 861 +++++++++----- .../agent/control/cproto/control_grpc.pb.go | 35 +- internal/pkg/agent/control/server/server.go | 355 +++--- internal/pkg/agent/install/uninstall.go | 3 +- .../pkg/agent/storage/store/state_store.go | 15 +- internal/pkg/capabilities/capabilities.go | 19 +- internal/pkg/capabilities/input.go | 20 +- internal/pkg/capabilities/output.go | 20 +- internal/pkg/capabilities/upgrade.go | 46 +- internal/pkg/composable/controller.go | 10 + .../pkg/fleetapi/acker/noop/noop_acker.go | 17 +- pkg/component/fake/main.go | 9 +- pkg/component/load.go | 9 + pkg/component/runtime/manager.go | 151 ++- pkg/component/runtime/manager_test.go | 33 +- pkg/component/runtime/runtime.go | 27 +- pkg/component/runtime/subscription.go | 31 +- pkg/core/server/config.go | 32 - pkg/core/server/config_test.go | 23 - pkg/core/server/server.go | 1018 ----------------- pkg/core/server/server_test.go | 794 ------------- version/version.go | 2 +- 67 files changed, 2281 insertions(+), 5512 deletions(-) rename internal/pkg/agent/application/{pipeline => }/actions/action.go (79%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_application.go (77%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_cancel.go (92%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_policy_change.go (86%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_policy_change_test.go (56%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_policy_reassign.go (91%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_settings.go (87%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_unenroll.go (67%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_upgrade.go (60%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_default.go (88%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_unknown.go (88%) rename internal/pkg/agent/application/{pipeline => }/dispatcher/dispatcher.go (76%) rename internal/pkg/agent/application/{pipeline => }/dispatcher/dispatcher_test.go (64%) delete mode 100644 internal/pkg/agent/application/gateway/fleet/noop_status_controller.go delete mode 100644 internal/pkg/agent/application/local_mode.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/controller.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/emitter.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/emitter_test.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go delete mode 100644 internal/pkg/agent/application/pipeline/pipeline.go delete mode 100644 internal/pkg/agent/application/pipeline/router/router.go delete mode 100644 internal/pkg/agent/application/pipeline/router/router_test.go delete mode 100644 internal/pkg/agent/application/pipeline/stream/factory.go delete mode 100644 internal/pkg/agent/application/pipeline/stream/operator_stream.go delete mode 100644 pkg/core/server/config.go delete mode 100644 pkg/core/server/config_test.go delete mode 100644 pkg/core/server/server.go delete mode 100644 pkg/core/server/server_test.go diff --git a/.gitignore b/.gitignore index f0b7911dbef..7bfae9cc392 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,8 @@ go_env.properties mage_output_file.go elastic_agent fleet.yml +fleet.enc +fleet.enc.lock # Editor swap files *.swp diff --git a/control.proto b/control.proto index efd063822de..a1a7a3f8b82 100644 --- a/control.proto +++ b/control.proto @@ -9,16 +9,23 @@ package cproto; option cc_enable_arenas = true; option go_package = "internal/pkg/agent/control/cproto"; -// Status codes for the current state. -enum Status { +// State codes for the current state. +enum State { STARTING = 0; CONFIGURING = 1; HEALTHY = 2; DEGRADED = 3; FAILED = 4; STOPPING = 5; - UPGRADING = 6; - ROLLBACK = 7; + STOPPED = 6; + UPGRADING = 7; + ROLLBACK = 8; +} + +// Unit Type running inside a component. +enum UnitType { + INPUT = 0; + OUTPUT = 1; } // Action status codes for restart and upgrade response. @@ -93,18 +100,43 @@ message UpgradeResponse { string error = 3; } -// Current status of the application in Elastic Agent. -message ApplicationStatus { - // Unique application ID. +message ComponentUnitState { + // Type of unit in the component. + UnitType unit_type = 1; + // ID of the unit in the component. + string unit_id = 2; + // Current state. + State state = 3; + // Current state message. + string message = 4; + // Current state payload. + string payload = 5; +} + +// Version information reported by the component to Elastic Agent. +message ComponentVersionInfo { + // Name of the component. + string name = 1; + // Version of the component. + string version = 2; + // Extra meta information about the version. + map meta = 3; +} + +// Current state of a running component by Elastic Agent. +message ComponentState { + // Unique component ID. string id = 1; - // Application name. + // Component name. string name = 2; - // Current status. - Status status = 3; - // Current status message. + // Current state. + State state = 3; + // Current state message. string message = 4; - // Current status payload. - string payload = 5; + // Current units running in the component. + repeated ComponentUnitState units = 5; + // Current version information for the running component. + ComponentVersionInfo version_info = 6; } // Current metadata for a running process. @@ -126,14 +158,14 @@ message ProcMeta { string error = 15; } -// Status is the current status of Elastic Agent. -message StatusResponse { - // Overall status of Elastic Agent. - Status status = 1; +// StateResponse is the current state of Elastic Agent. +message StateResponse { + // Overall state of Elastic Agent. + State state = 1; // Overall status message of Elastic Agent. string message = 2; - // Status of each application in Elastic Agent. - repeated ApplicationStatus applications = 3; + // Status of each component in Elastic Agent. + repeated ComponentState components = 3; } // ProcMetaResponse is the current running version infomation for all processes. @@ -184,8 +216,8 @@ service ElasticAgentControl { // Fetches the currently running version of the Elastic Agent. rpc Version(Empty) returns (VersionResponse); - // Fetches the currently status of the Elastic Agent. - rpc Status(Empty) returns (StatusResponse); + // Fetches the currently states of the Elastic Agent. + rpc State(Empty) returns (StateResponse); // Restart restarts the current running Elastic Agent. rpc Restart(Empty) returns (RestartResponse); diff --git a/internal/pkg/agent/application/pipeline/actions/action.go b/internal/pkg/agent/application/actions/action.go similarity index 79% rename from internal/pkg/agent/application/pipeline/actions/action.go rename to internal/pkg/agent/application/actions/action.go index 794ee5ca3df..120316e1dfb 100644 --- a/internal/pkg/agent/application/pipeline/actions/action.go +++ b/internal/pkg/agent/application/actions/action.go @@ -7,14 +7,14 @@ package actions import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" ) // Handler handles action coming from fleet. type Handler interface { - Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error + Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error } // ClientSetter sets the client for communication. diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go similarity index 77% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_application.go rename to internal/pkg/agent/application/actions/handlers/handler_action_application.go index 8d8ce830421..b72d17a6847 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -7,13 +7,15 @@ package handlers import ( "context" "fmt" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/pkg/component" "time" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) const ( @@ -25,27 +27,28 @@ var errActionTimeoutInvalid = errors.New("action timeout is invalid") // AppAction is a handler for application actions. type AppAction struct { - log *logger.Logger - srv *server.Server + log *logger.Logger + coord *coordinator.Coordinator } // NewAppAction creates a new AppAction handler. -func NewAppAction(log *logger.Logger, srv *server.Server) *AppAction { +func NewAppAction(log *logger.Logger, coord *coordinator.Coordinator) *AppAction { return &AppAction{ - log: log, - srv: srv, + log: log, + coord: coord, } } // Handle handles application action. -func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerAppAction: action '%+v' received", a) action, ok := a.(*fleetapi.ActionApp) if !ok { return fmt.Errorf("invalid type, expected ActionApp and received %T", a) } - appState, ok := h.srv.FindByInputType(action.InputType) + state := h.coord.State() + unit, ok := findUnitFromInputType(state, action.InputType) if !ok { // If the matching action is not found ack the action with the error for action result document action.StartedAt = time.Now().UTC().Format(time.RFC3339Nano) @@ -71,8 +74,10 @@ func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker store.F var res map[string]interface{} if err == nil { - h.log.Debugf("handlerAppAction: action '%v' started with timeout: %v", action.InputType, timeout) - res, err = appState.PerformAction(action.InputType, params, timeout) + h.log.Debugf("handlerAppAction: action '%v' started with timeout: %v", action.ActionType, timeout) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + res, err = h.coord.PerformAction(ctx, unit, action.ActionType, params) } end := time.Now().UTC() @@ -143,3 +148,17 @@ func readMapString(m map[string]interface{}, key string, def string) string { } return def } + +func findUnitFromInputType(state coordinator.State, inputType string) (component.Unit, bool) { + for _, comp := range state.Components { + for _, unit := range comp.Component.Units { + if unit.Type == client.UnitTypeInput { + it, ok := unit.Config["type"] + if ok && it == inputType { + return unit, true + } + } + } + } + return component.Unit{}, false +} diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_cancel.go b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go similarity index 92% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_cancel.go rename to internal/pkg/agent/application/actions/handlers/handler_action_cancel.go index a2208c7294d..7944cdd8fd2 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_cancel.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go @@ -7,8 +7,8 @@ package handlers import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -32,7 +32,7 @@ func NewCancel(log *logger.Logger, cancel queueCanceler) *Cancel { } // Handle will cancel any actions in the queue that match target_id. -func (h *Cancel) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Cancel) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { action, ok := a.(*fleetapi.ActionCancel) if !ok { return fmt.Errorf("invalid type, expected ActionCancel and received %T", a) diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go similarity index 86% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go rename to internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go index 3775d12b352..572b820c6ce 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go @@ -8,6 +8,9 @@ import ( "bytes" "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "io" "io/ioutil" "sort" @@ -16,12 +19,9 @@ import ( "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" @@ -36,28 +36,28 @@ const ( // PolicyChange is a handler for POLICY_CHANGE action. type PolicyChange struct { log *logger.Logger - emitter pipeline.EmitterFunc agentInfo *info.AgentInfo config *configuration.Configuration store storage.Store + ch chan coordinator.ConfigChange setters []actions.ClientSetter } // NewPolicyChange creates a new PolicyChange handler. func NewPolicyChange( log *logger.Logger, - emitter pipeline.EmitterFunc, agentInfo *info.AgentInfo, config *configuration.Configuration, store storage.Store, + ch chan coordinator.ConfigChange, setters ...actions.ClientSetter, ) *PolicyChange { return &PolicyChange{ log: log, - emitter: emitter, agentInfo: agentInfo, config: config, store: store, + ch: ch, setters: setters, } } @@ -72,7 +72,7 @@ func (h *PolicyChange) AddSetter(cs actions.ClientSetter) { } // Handle handles policy change action. -func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerPolicyChange: action '%+v' received", a) action, ok := a.(*fleetapi.ActionPolicyChange) if !ok { @@ -89,11 +89,19 @@ func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker stor if err != nil { return err } - if err := h.emitter(ctx, c); err != nil { - return err + + h.ch <- &policyChange{ + ctx: ctx, + cfg: c, + action: a, + acker: acker, } + return nil +} - return acker.Ack(ctx, action) +// Watch returns the channel for configuration change notifications. +func (h *PolicyChange) Watch() <-chan coordinator.ConfigChange { + return h.ch } func (h *PolicyChange) handleFleetServerHosts(ctx context.Context, c *config.Config) (err error) { @@ -210,3 +218,33 @@ func fleetToReader(agentInfo *info.AgentInfo, cfg *configuration.Configuration) } return bytes.NewReader(data), nil } + +type policyChange struct { + ctx context.Context + cfg *config.Config + action fleetapi.Action + acker acker.Acker + commit bool +} + +func (l *policyChange) Config() *config.Config { + return l.cfg +} + +func (l *policyChange) Ack() error { + if l.action == nil { + return nil + } + err := l.acker.Ack(l.ctx, l.action) + if err != nil { + return err + } + if l.commit { + return l.acker.Commit(l.ctx) + } + return nil +} + +func (l *policyChange) Fail(_ error) { + // do nothing +} diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go similarity index 56% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go rename to internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go index d887e755154..657611ca797 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go @@ -6,6 +6,7 @@ package handlers import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "sync" "testing" @@ -13,7 +14,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,24 +23,14 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) -type mockEmitter struct { - err error - policy *config.Config -} - -func (m *mockEmitter) Emitter(_ context.Context, policy *config.Config) error { - m.policy = policy - return m.err -} - func TestPolicyChange(t *testing.T) { log, _ := logger.New("", false) - ack := noopacker.NewAcker() + ack := noopacker.New() agentInfo, _ := info.NewAgentInfo(true) nullStore := &storage.NullStore{} t.Run("Receive a config change and successfully emits a raw configuration", func(t *testing.T) { - emitter := &mockEmitter{} + ch := make(chan coordinator.ConfigChange, 1) conf := map[string]interface{}{"hello": "world"} action := &fleetapi.ActionPolicyChange{ @@ -50,41 +40,13 @@ func TestPolicyChange(t *testing.T) { } cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } + handler := NewPolicyChange(log, agentInfo, cfg, nullStore, ch) err := handler.Handle(context.Background(), action, ack) require.NoError(t, err) - require.Equal(t, config.MustNewConfigFrom(conf), emitter.policy) - }) - - t.Run("Receive a config and fail to emits a raw configuration", func(t *testing.T) { - mockErr := errors.New("error returned") - emitter := &mockEmitter{err: mockErr} - - conf := map[string]interface{}{"hello": "world"} - action := &fleetapi.ActionPolicyChange{ - ActionID: "abc123", - ActionType: "POLICY_CHANGE", - Policy: conf, - } - - cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } - err := handler.Handle(context.Background(), action, ack) - require.Error(t, err) + change := <-ch + require.Equal(t, config.MustNewConfigFrom(conf), change.Config()) }) } @@ -93,41 +55,10 @@ func TestPolicyAcked(t *testing.T) { agentInfo, _ := info.NewAgentInfo(true) nullStore := &storage.NullStore{} - t.Run("Config change should not ACK on error", func(t *testing.T) { - tacker := &testAcker{} - - mockErr := errors.New("error returned") - emitter := &mockEmitter{err: mockErr} - - config := map[string]interface{}{"hello": "world"} - actionID := "abc123" - action := &fleetapi.ActionPolicyChange{ - ActionID: actionID, - ActionType: "POLICY_CHANGE", - Policy: config, - } - - cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } - - err := handler.Handle(context.Background(), action, tacker) - require.Error(t, err) - - actions := tacker.Items() - assert.EqualValues(t, 0, len(actions)) - }) - t.Run("Config change should ACK", func(t *testing.T) { + ch := make(chan coordinator.ConfigChange, 1) tacker := &testAcker{} - emitter := &mockEmitter{} - config := map[string]interface{}{"hello": "world"} actionID := "abc123" action := &fleetapi.ActionPolicyChange{ @@ -137,17 +68,14 @@ func TestPolicyAcked(t *testing.T) { } cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } + handler := NewPolicyChange(log, agentInfo, cfg, nullStore, ch) err := handler.Handle(context.Background(), action, tacker) require.NoError(t, err) + change := <-ch + require.NoError(t, change.Ack()) + actions := tacker.Items() assert.EqualValues(t, 1, len(actions)) assert.Equal(t, actionID, actions[0]) diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_reassign.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go similarity index 91% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_reassign.go rename to internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go index 962447b8a35..6f367700819 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_reassign.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go @@ -6,8 +6,8 @@ package handlers import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -25,7 +25,7 @@ func NewPolicyReassign(log *logger.Logger) *PolicyReassign { } // Handle handles POLICY_REASSIGN action. -func (h *PolicyReassign) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *PolicyReassign) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerPolicyReassign: action '%+v' received", a) if err := acker.Ack(ctx, a); err != nil { diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_settings.go b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go similarity index 87% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_settings.go rename to internal/pkg/agent/application/actions/handlers/handler_action_settings.go index 5418a0f3eb6..bd05205817e 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_settings.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go @@ -7,11 +7,12 @@ package handlers import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -23,25 +24,25 @@ type reexecManager interface { // Settings handles settings change coming from fleet and updates log level. type Settings struct { log *logger.Logger - reexec reexecManager agentInfo *info.AgentInfo + coord *coordinator.Coordinator } // NewSettings creates a new Settings handler. func NewSettings( log *logger.Logger, - reexec reexecManager, agentInfo *info.AgentInfo, + coord *coordinator.Coordinator, ) *Settings { return &Settings{ log: log, - reexec: reexec, agentInfo: agentInfo, + coord: coord, } } // Handle handles SETTINGS action. -func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerUpgrade: action '%+v' received", a) action, ok := a.(*fleetapi.ActionSettings) if !ok { @@ -62,7 +63,7 @@ func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker store.Fl h.log.Errorf("failed to commit acker after acknowledging action with id '%s'", action.ActionID) } - h.reexec.ReExec(nil) + h.coord.ReExec(nil) return nil } diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go similarity index 67% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go rename to internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go index 71fe0f30644..0753dea39e1 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go @@ -7,10 +7,10 @@ package handlers import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -27,8 +27,7 @@ type stateStore interface { // For it to be operational again it needs to be either enrolled or reconfigured. type Unenroll struct { log *logger.Logger - emitter pipeline.EmitterFunc - dispatcher pipeline.Router + ch chan coordinator.ConfigChange closers []context.CancelFunc stateStore stateStore } @@ -36,43 +35,40 @@ type Unenroll struct { // NewUnenroll creates a new Unenroll handler. func NewUnenroll( log *logger.Logger, - emitter pipeline.EmitterFunc, - dispatcher pipeline.Router, + ch chan coordinator.ConfigChange, closers []context.CancelFunc, stateStore stateStore, ) *Unenroll { return &Unenroll{ log: log, - emitter: emitter, - dispatcher: dispatcher, + ch: ch, closers: closers, stateStore: stateStore, } } // Handle handles UNENROLL action. -func (h *Unenroll) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Unenroll) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerUnenroll: action '%+v' received", a) action, ok := a.(*fleetapi.ActionUnenroll) if !ok { return fmt.Errorf("invalid type, expected ActionUnenroll and received %T", a) } - // Providing empty map will close all pipelines - noPrograms := make(map[pipeline.RoutingKey][]program.Program) - _ = h.dispatcher.Route(ctx, a.ID(), noPrograms) + if action.IsDetected { + // not from Fleet; so we set it to nil so policyChange doesn't ack it + a = nil + } - if !action.IsDetected { - // ACK only events received from fleet. - if err := acker.Ack(ctx, action); err != nil { - return err - } + h.ch <- &policyChange{ + ctx: ctx, + cfg: config.New(), + action: a, + acker: acker, + commit: true, + } - // commit all acks before quitting. - if err := acker.Commit(ctx); err != nil { - return err - } - } else if h.stateStore != nil { + if h.stateStore != nil { // backup action for future start to avoid starting fleet gateway loop h.stateStore.Add(a) h.stateStore.Save() diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go similarity index 60% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go rename to internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go index cfc7ea83749..01462b47b5c 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go @@ -7,9 +7,9 @@ package handlers import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -18,42 +18,25 @@ import ( // After running Upgrade agent should download its own version specified by action // from repository specified by fleet. type Upgrade struct { - log *logger.Logger - upgrader *upgrade.Upgrader + log *logger.Logger + coord *coordinator.Coordinator } // NewUpgrade creates a new Upgrade handler. -func NewUpgrade(log *logger.Logger, upgrader *upgrade.Upgrader) *Upgrade { +func NewUpgrade(log *logger.Logger, coord *coordinator.Coordinator) *Upgrade { return &Upgrade{ - log: log, - upgrader: upgrader, + log: log, + coord: coord, } } // Handle handles UPGRADE action. -func (h *Upgrade) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Upgrade) Handle(ctx context.Context, a fleetapi.Action, _ acker.Acker) error { h.log.Debugf("handlerUpgrade: action '%+v' received", a) action, ok := a.(*fleetapi.ActionUpgrade) if !ok { return fmt.Errorf("invalid type, expected ActionUpgrade and received %T", a) } - _, err := h.upgrader.Upgrade(ctx, &upgradeAction{action}, true) - return err -} - -type upgradeAction struct { - *fleetapi.ActionUpgrade -} - -func (a *upgradeAction) Version() string { - return a.ActionUpgrade.Version -} - -func (a *upgradeAction) SourceURI() string { - return a.ActionUpgrade.SourceURI -} - -func (a *upgradeAction) FleetAction() *fleetapi.ActionUpgrade { - return a.ActionUpgrade + return h.coord.Upgrade(ctx, action.Version, action.SourceURI, action) } diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_default.go b/internal/pkg/agent/application/actions/handlers/handler_default.go similarity index 88% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_default.go rename to internal/pkg/agent/application/actions/handlers/handler_default.go index 873c3fd7c5a..2afd8b77a67 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_default.go +++ b/internal/pkg/agent/application/actions/handlers/handler_default.go @@ -6,8 +6,8 @@ package handlers import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -25,7 +25,7 @@ func NewDefault(log *logger.Logger) *Default { } // Handle is a default handler, no action is taken. -func (h *Default) Handle(_ context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Default) Handle(_ context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Errorf("HandlerDefault: action '%+v' received", a) return nil } diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_unknown.go b/internal/pkg/agent/application/actions/handlers/handler_unknown.go similarity index 88% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_unknown.go rename to internal/pkg/agent/application/actions/handlers/handler_unknown.go index 58e0640fe4d..5a62b60c42f 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_unknown.go +++ b/internal/pkg/agent/application/actions/handlers/handler_unknown.go @@ -6,8 +6,8 @@ package handlers import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -25,7 +25,7 @@ func NewUnknown(log *logger.Logger) *Unknown { } // Handle handles unknown actions, no action is taken. -func (h *Unknown) Handle(_ context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Unknown) Handle(_ context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Errorf("HandlerUnknown: action '%+v' received", a) return nil } diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 17ff9d4d83e..b5254896fc1 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -5,76 +5,64 @@ package application import ( - "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/pkg/component" - "github.com/elastic/go-sysinfo" - "go.elastic.co/apm" + "path/filepath" goruntime "runtime" "strconv" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/sorted" + "github.com/elastic/go-sysinfo" + "go.elastic.co/apm" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" + "github.com/elastic/elastic-agent/internal/pkg/capabilities" + "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/internal/pkg/dir" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" ) -// PlatformModifier can modify the platform details before the runtime specifications are loaded. -type PlatformModifier func(detail component.PlatformDetail) component.PlatformDetail +type discoverFunc func() ([]string, error) -// Application is the application interface implemented by the different running mode. -type Application interface { - Run(ctx context.Context) error - AgentInfo() *info.AgentInfo - Routes() *sorted.Set -} - -type reexecManager interface { - ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) -} +// ErrNoConfiguration is returned when no configuration are found. +var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) -type upgraderControl interface { - SetUpgrader(upgrader *upgrade.Upgrader) -} +// PlatformModifier can modify the platform details before the runtime specifications are loaded. +type PlatformModifier func(detail component.PlatformDetail) component.PlatformDetail // New creates a new Agent and bootstrap the required subsystem. func New( log *logger.Logger, - reexec reexecManager, - statusCtrl status.Controller, - uc upgraderControl, agentInfo *info.AgentInfo, + reexec coordinator.ReExecManager, tracer *apm.Tracer, modifiers ...PlatformModifier, -) (Application, error) { - log.Info("Gathering system information") +) (*coordinator.Coordinator, error) { platform, err := getPlatformDetail(modifiers...) if err != nil { return nil, fmt.Errorf("failed to gather system information: %w", err) } + log.Info("Gathered system information") - log.Info("Detecting available inputs and outputs") specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) if err != nil { return nil, fmt.Errorf("failed to detect inputs and outputs: %w", err) } + log.With("inputs", specs.Inputs()).Info("Detected available inputs and outputs") - log.Info("Determine allowed capabilities") - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) if err != nil { return nil, fmt.Errorf("failed to determine capabilities: %w", err) } + log.Info("Determined allowed capabilities") - log.Info("Parsing configuration and determining execution mode") pathConfigFile := paths.ConfigFile() rawConfig, err := config.LoadFile(pathConfigFile) if err != nil { @@ -88,27 +76,58 @@ func New( return nil, fmt.Errorf("failed to load configuration: %w", err) } + upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig) + + var configMgr coordinator.ConfigManager + var managed *managedConfigManager if configuration.IsStandalone(cfg.Fleet) { - log.Info("Agent is managed locally") - return newLocal(log, specs, caps, cfg, paths.ConfigFile(), rawConfig, reexec, statusCtrl, uc, agentInfo, tracer) + log.Info("Parsed configuration and determined agent is managed locally") + + loader := config.NewLoader(log, externalConfigsGlob()) + discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) + if !cfg.Settings.Reload.Enabled { + log.Debug("Reloading of configuration is off") + configMgr = newOnce(log, discover, loader) + } else { + log.Debugf("Reloading of configuration is on, frequency is set to %s", cfg.Settings.Reload.Period) + configMgr = newPeriodic(log, cfg.Settings.Reload.Period, discover, loader) + } + } else if configuration.IsFleetServerBootstrap(cfg.Fleet) { + log.Info("Parsed configuration and determined agent is in Fleet Server bootstrap mode") + // //return newFleetServerBootstrap(ctx, log, pathConfigFile, rawConfig, statusCtrl, agentInfo, tracer) + // return nil, errors.New("TODO: fleet-server bootstrap mode") + } else { + log.Info("Parsed configuration and determined agent is managed by Fleet") + + var store storage.Store + store, cfg, err = mergeFleetConfig(rawConfig) + if err != nil { + return nil, err + } + + managed, err = newManagedConfigManager(log, agentInfo, cfg, store) + if err != nil { + return nil, err + } + configMgr = managed + } + + runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) + if err != nil { + return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) } - - // not in standalone; both modes require reading the fleet.yml configuration file - //var store storage.Store - //store, cfg, err = mergeFleetConfig(rawConfig) - //if err != nil { - // return nil, err - //} - - if configuration.IsFleetServerBootstrap(cfg.Fleet) { - log.Info("Agent is in Fleet Server bootstrap mode") - //return newFleetServerBootstrap(ctx, log, pathConfigFile, rawConfig, statusCtrl, agentInfo, tracer) - return nil, errors.New("TODO: fleet-server bootstrap mode") + composable, err := composable.New(log, rawConfig) + if err != nil { + return nil, errors.New(err, "failed to initialize composable controller") } - log.Info("Agent is managed by Fleet") - //return newManaged(ctx, log, store, cfg, rawConfig, reexec, statusCtrl, agentInfo, tracer) - return nil, errors.New("TODO: fleet mode") + coord := coordinator.New(log, specs, reexec, upgrader, runtime, configMgr, composable, caps) + if managed != nil { + // the coordinator requires the config manager as well as in managed-mode the config manager requires the + // coordinator, so it must be set here once the coordinator is created + managed.coord = coord + } + return coord, nil } func getPlatformDetail(modifiers ...PlatformModifier) (component.PlatformDetail, error) { @@ -176,3 +195,28 @@ func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.C return store, cfg, nil } + +func externalConfigsGlob() string { + return filepath.Join(paths.Config(), configuration.ExternalInputsPattern) +} + +func discoverer(patterns ...string) discoverFunc { + var p []string + for _, newP := range patterns { + if len(newP) == 0 { + continue + } + + p = append(p, newP) + } + + if len(p) == 0 { + return func() ([]string, error) { + return []string{}, ErrNoConfiguration + } + } + + return func() ([]string, error) { + return dir.DiscoverFiles(p...) + } +} diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index 49cb5a6eabc..49141ab3524 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -4,19 +4,51 @@ import ( "context" "errors" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" + agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" "go.elastic.co/apm" ) -// Runner provides interface to run a manager. +var ( + // ErrNotUpgradable error is returned when upgrade cannot be performed. + ErrNotUpgradable = errors.New( + "cannot be upgraded; must be installed with install sub-command and " + + "running under control of the systems supervisor") +) + +// ReExecManager provides an interface to perform re-execution of the entire agent. +type ReExecManager interface { + ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) +} + +// UpgradeManager provides an interface to perform the upgrade action for the agent. +type UpgradeManager interface { + // Upgradeable returns true if can be upgraded. + Upgradeable() bool + + // Upgrade upgrades running agent. + Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) +} + +// Runner provides interface to run a manager and receive running errors. type Runner interface { // Run runs the manager. Run(context.Context) error + + // Errors returns the channel to listen to errors on. + // + // A manager should send a nil error to clear its previous error when it should no longer report as an error. + Errors() <-chan error } // RuntimeManager provides an interface to run and update the runtime. @@ -25,6 +57,33 @@ type RuntimeManager interface { // Update updates the current components model. Update([]component.Component) error + + // State returns the current components model state. + State() []runtime.ComponentComponentState + + // PerformAction executes an action on a unit. + PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) + + // SubscribeAll provides an interface to watch for changes in all components. + SubscribeAll(context.Context) *runtime.SubscriptionAll +} + +// ConfigChange provides an interface for receiving a new configuration. +// +// Ack must be called if the configuration change was accepted and Fail should be called if it fails to be accepted. +type ConfigChange interface { + // Config returns the configuration for this change. + Config() *config.Config + + // Ack marks the configuration change as accepted. + Ack() error + + // Fail marks the configuration change as failed. + Fail(err error) +} + +// ErrorReporter provides an interface for any manager that is handled by the coordinator to report errors. +type ErrorReporter interface { } // ConfigManager provides an interface to run and watch for configuration changes. @@ -32,7 +91,7 @@ type ConfigManager interface { Runner // Watch returns the chanel to watch for configuration changes. - Watch() <-chan *config.Config + Watch() <-chan ConfigChange } // VarsManager provides an interface to run and watch for variable changes. @@ -47,15 +106,36 @@ type VarsManager interface { // passing it into the components runtime manager. type ComponentsModifier func(comps []component.Component) ([]component.Component, error) -// Coordinator manages the intersection between configuration change and updated variables. +// State provides the current state of the coordinator along with all the current states of components and units. +type State struct { + State agentclient.State + Message string + Components []runtime.ComponentComponentState +} + +// StateFetcher provides an interface to fetch the current state of the coordinator. +type StateFetcher interface { + // State returns the current state of the coordinator. + State() State +} + +// Coordinator manages the entire state of the Elastic Agent. +// +// All configuration changes, update variables, and upgrade actions are managed and controlled by the coordinator. type Coordinator struct { logger *logger.Logger specs component.RuntimeSpecs - runtime RuntimeManager - config ConfigManager - vars VarsManager + reexecMgr ReExecManager + upgradeMgr UpgradeManager + + runtimeMgr RuntimeManager + runtimeMgrErr error + configMgr ConfigManager + configMgrErr error + varsMgr VarsManager + varsMgrErr error caps capabilities.Capability modifiers []ComponentsModifier @@ -64,26 +144,160 @@ type Coordinator struct { } // New creates a new coordinator. -func New(logger *logger.Logger, specs component.RuntimeSpecs, runtime RuntimeManager, config ConfigManager, vars VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { +func New(logger *logger.Logger, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { return &Coordinator{ - logger: logger, - specs: specs, - runtime: runtime, - config: config, - vars: vars, - caps: caps, - modifiers: modifiers, + logger: logger, + specs: specs, + reexecMgr: reexecMgr, + upgradeMgr: upgradeMgr, + runtimeMgr: runtimeMgr, + configMgr: configMgr, + varsMgr: varsMgr, + caps: caps, + modifiers: modifiers, + state: coordinatorState{ + state: agentclient.Starting, + }, + } +} + +// State returns the current state for the coordinator. +func (c *Coordinator) State() (s State) { + s.State = c.state.state + s.Message = c.state.message + s.Components = c.runtimeMgr.State() + if c.state.overrideState != nil { + // state has been overridden due to an action that is occurring + s.State = c.state.overrideState.state + s.Message = c.state.overrideState.message + } else if s.State == agentclient.Healthy { + // if any of the managers are reporting an error then something is wrong + // or + // coordinator overall is reported is healthy; in the case any component or unit is not healthy then we report + // as degraded because we are not fully healthy + if c.runtimeMgrErr != nil { + s.State = agentclient.Failed + s.Message = c.runtimeMgrErr.Error() + } else if c.configMgrErr != nil { + s.State = agentclient.Failed + s.Message = c.configMgrErr.Error() + } else if c.varsMgrErr != nil { + s.State = agentclient.Failed + s.Message = c.varsMgrErr.Error() + } else if hasState(s.Components, client.UnitStateFailed) { + s.State = agentclient.Degraded + s.Message = "1 or more components/units in a failed state" + } else if hasState(s.Components, client.UnitStateDegraded) { + s.State = agentclient.Degraded + s.Message = "1 or more components/units in a degraded state" + } + } + return s +} + +// ReExec performs the re-execution. +func (c *Coordinator) ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) { + // override the overall state to stopping until the re-execution is complete + c.state.overrideState = &coordinatorOverrideState{ + state: agentclient.Stopping, + message: "Re-executing", + } + c.reexecMgr.ReExec(callback, argOverrides...) +} + +// Upgrade runs the upgrade process. +func (c *Coordinator) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) error { + // early check outside of upgrader before overridding the state + if c.upgradeMgr.Upgradeable() { + return ErrNotUpgradable } + + // early check capabilities to ensure this upgrade actions is allowed + if c.caps != nil { + if _, err := c.caps.Apply(map[string]interface{}{ + "version": version, + "sourceURI": sourceURI, + }); errors.Is(err, capabilities.ErrBlocked) { + return ErrNotUpgradable + } + } + + // override the overall state to upgrading until the re-execution is complete + c.state.overrideState = &coordinatorOverrideState{ + state: agentclient.Upgrading, + message: fmt.Sprintf("Upgrading to version %s", version), + } + cb, err := c.upgradeMgr.Upgrade(ctx, version, sourceURI, action) + if err != nil { + c.state.overrideState = nil + return err + } + c.ReExec(cb) + return nil +} + +// PerformAction executes an action on a unit. +func (c *Coordinator) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { + return c.runtimeMgr.PerformAction(ctx, unit, name, params) } // Run runs the coordinator. // // The RuntimeManager, ConfigManager and VarsManager that is passed into NewCoordinator are also ran and lifecycle controlled by the Run. +// +// In the case that either of the above managers fail, they will all be restarted unless the context was explicitly cancelled or timed out. func (c *Coordinator) Run(ctx context.Context) error { + // log all changes in the state of the runtime + go func() { + state := make(map[string]coordinatorComponentLogState) + + sub := c.runtimeMgr.SubscribeAll(ctx) + for { + select { + case <-ctx.Done(): + return + case s := <-sub.Ch(): + logState := newCoordinatorComponentLogState(&s) + _, ok := state[s.Component.ID] + if !ok { + c.logger.With("component", logState).Info("New component created") + } else { + c.logger.With("component", logState).Info("Existing component state changed") + } + state[s.Component.ID] = logState + if s.State.State == client.UnitStateStopped { + delete(state, s.Component.ID) + } + } + } + }() + + for { + c.state.state = agentclient.Starting + c.state.message = "Waiting for initial configuration and composable variables" + err := c.runner(ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + c.state.state = agentclient.Stopped + c.state.message = "Requested to be stopped" + // do not restart + return err + } + } + c.state.state = agentclient.Failed + c.state.message = fmt.Sprintf("Coordinator failed and will be restarted: %s", err) + c.logger.Errorf("coordinator failed and will be restarted: %s", err) + } +} + +// runner performs the actual work of running all the managers +// +// if one of the managers fails the others are also stopped and then the whole runner returns +func (c *Coordinator) runner(ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - runtimeWatcher := c.runtime + runtimeWatcher := c.runtimeMgr runtimeRun := make(chan bool) runtimeErrCh := make(chan error) go func(manager Runner) { @@ -92,7 +306,7 @@ func (c *Coordinator) Run(ctx context.Context) error { runtimeErrCh <- err }(runtimeWatcher) - configWatcher := c.config + configWatcher := c.configMgr configRun := make(chan bool) configErrCh := make(chan error) go func(manager Runner) { @@ -101,7 +315,7 @@ func (c *Coordinator) Run(ctx context.Context) error { configErrCh <- err }(configWatcher) - varsWatcher := c.vars + varsWatcher := c.varsMgr varsRun := make(chan bool) varsErrCh := make(chan error) go func(manager Runner) { @@ -114,14 +328,17 @@ func (c *Coordinator) Run(ctx context.Context) error { select { case <-ctx.Done(): runtimeErr := <-runtimeErrCh + c.runtimeMgrErr = runtimeErr + configErr := <-configErrCh + c.configMgrErr = configErr + varsErr := <-varsErrCh + c.varsMgrErr = varsErr if runtimeErr != nil && !errors.Is(runtimeErr, context.Canceled) { return runtimeErr } - configErr := <-configErrCh if configErr != nil && !errors.Is(configErr, context.Canceled) { return configErr } - varsErr := <-varsErrCh if varsErr != nil && !errors.Is(varsErr, context.Canceled) { return varsErr } @@ -138,15 +355,33 @@ func (c *Coordinator) Run(ctx context.Context) error { if ctx.Err() == nil { cancel() } - case cfg := <-configWatcher.Watch(): + case runtimeErr := <-c.runtimeMgr.Errors(): + c.runtimeMgrErr = runtimeErr + case configErr := <-c.configMgr.Errors(): + c.configMgrErr = configErr + case varsErr := <-c.varsMgr.Errors(): + c.varsMgrErr = varsErr + case change := <-configWatcher.Watch(): if ctx.Err() == nil { - if err := c.processConfig(ctx, cfg); err != nil { + if err := c.processConfig(ctx, change.Config()); err != nil { + c.state.state = agentclient.Failed + c.state.message = err.Error() c.logger.Errorf("%s", err) + change.Fail(err) + } else { + if err := change.Ack(); err != nil { + err = fmt.Errorf("failed to ack configuration change: %w", err) + c.state.state = agentclient.Failed + c.state.message = err.Error() + c.logger.Errorf("%s", err) + } } } case vars := <-varsWatcher.Watch(): if ctx.Err() == nil { if err := c.processVars(ctx, vars); err != nil { + c.state.state = agentclient.Failed + c.state.message = err.Error() c.logger.Errorf("%s", err) } } @@ -250,12 +485,102 @@ func (c *Coordinator) process(ctx context.Context) (err error) { c.logger.Info("Updating running component model") c.logger.With("components", comps).Debug("Updating running component model") - return c.runtime.Update(comps) + err = c.runtimeMgr.Update(comps) + if err != nil { + return err + } + c.state.state = agentclient.Healthy + c.state.message = "Running" + return nil } type coordinatorState struct { + state agentclient.State + message string + overrideState *coordinatorOverrideState + config *config.Config ast *transpiler.AST vars []*transpiler.Vars components []component.Component } + +type coordinatorOverrideState struct { + state agentclient.State + message string +} + +type coordinatorComponentLogState struct { + ID string `json:"id"` + State string `json:"state"` + Message string `json:"message"` + Inputs []coordinatorComponentUnitLogState `json:"inputs"` + Output coordinatorComponentUnitLogState `json:"output,omitempty"` +} + +type coordinatorComponentUnitLogState struct { + ID string `json:"id"` + State string `json:"state"` + Message string `json:"message"` +} + +func newCoordinatorComponentLogState(state *runtime.ComponentComponentState) coordinatorComponentLogState { + var output coordinatorComponentUnitLogState + inputs := make([]coordinatorComponentUnitLogState, 0, len(state.State.Units)) + for key, unit := range state.State.Units { + if key.UnitType == client.UnitTypeInput { + inputs = append(inputs, coordinatorComponentUnitLogState{ + ID: key.UnitID, + State: newCoordinatorComponentStateStr(unit.State), + Message: unit.Message, + }) + } else { + output = coordinatorComponentUnitLogState{ + ID: key.UnitID, + State: newCoordinatorComponentStateStr(unit.State), + Message: unit.Message, + } + } + } + return coordinatorComponentLogState{ + ID: state.Component.ID, + State: newCoordinatorComponentStateStr(state.State.State), + Message: state.State.Message, + Inputs: inputs, + Output: output, + } +} + +func newCoordinatorComponentStateStr(state client.UnitState) string { + switch state { + case client.UnitStateStarting: + return "Starting" + case client.UnitStateConfiguring: + return "Configuring" + case client.UnitStateDegraded: + return "Degraded" + case client.UnitStateHealthy: + return "Healthy" + case client.UnitStateFailed: + return "Failed" + case client.UnitStateStopping: + return "Stopping" + case client.UnitStateStopped: + return "Stopped" + } + return "Unknown" +} + +func hasState(components []runtime.ComponentComponentState, state client.UnitState) bool { + for _, comp := range components { + if comp.State.State == state { + return true + } + for _, unit := range comp.State.Units { + if unit.State == state { + return true + } + } + } + return false +} diff --git a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher.go b/internal/pkg/agent/application/dispatcher/dispatcher.go similarity index 76% rename from internal/pkg/agent/application/pipeline/dispatcher/dispatcher.go rename to internal/pkg/agent/application/dispatcher/dispatcher.go index 6f036b57b21..efe70a2846c 100644 --- a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher.go @@ -7,30 +7,34 @@ package dispatcher import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "reflect" "strings" "go.elastic.co/apm" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) type actionHandlers map[string]actions.Handler +// Dispatcher processes actions coming from fleet api. +type Dispatcher interface { + Dispatch(context.Context, acker.Acker, ...fleetapi.Action) error +} + // ActionDispatcher processes actions coming from fleet using registered set of handlers. type ActionDispatcher struct { - ctx context.Context log *logger.Logger handlers actionHandlers def actions.Handler } // New creates a new action dispatcher. -func New(ctx context.Context, log *logger.Logger, def actions.Handler) (*ActionDispatcher, error) { +func New(log *logger.Logger, def actions.Handler) (*ActionDispatcher, error) { var err error if log == nil { log, err = logger.New("action_dispatcher", false) @@ -44,7 +48,6 @@ func New(ctx context.Context, log *logger.Logger, def actions.Handler) (*ActionD } return &ActionDispatcher{ - ctx: ctx, log: log, handlers: make(actionHandlers), def: def, @@ -76,21 +79,13 @@ func (ad *ActionDispatcher) key(a fleetapi.Action) string { } // Dispatch dispatches an action using pre-registered set of handlers. -// ctx is used here ONLY to carry the span, for cancelation use the cancel -// function of the ActionDispatcher.ctx. -func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker store.FleetAcker, actions ...fleetapi.Action) (err error) { +func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, actions ...fleetapi.Action) (err error) { span, ctx := apm.StartSpan(ctx, "dispatch", "app.internal") defer func() { apm.CaptureError(ctx, err).Send() span.End() }() - // Creating a child context that carries both the ad.ctx cancelation and - // the span from ctx. - ctx, cancel := context.WithCancel(ad.ctx) - defer cancel() - ctx = apm.ContextWithSpan(ctx, span) - if len(actions) == 0 { ad.log.Debug("No action to dispatch") return nil @@ -103,11 +98,11 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker store.FleetAcker ) for _, action := range actions { - if err := ad.ctx.Err(); err != nil { + if err := ctx.Err(); err != nil { return err } - if err := ad.dispatchAction(action, acker); err != nil { + if err := ad.dispatchAction(ctx, action, acker); err != nil { ad.log.Debugf("Failed to dispatch action '%+v', error: %+v", action, err) return err } @@ -117,13 +112,13 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker store.FleetAcker return acker.Commit(ctx) } -func (ad *ActionDispatcher) dispatchAction(a fleetapi.Action, acker store.FleetAcker) error { +func (ad *ActionDispatcher) dispatchAction(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { handler, found := ad.handlers[(ad.key(a))] if !found { - return ad.def.Handle(ad.ctx, a, acker) + return ad.def.Handle(ctx, a, acker) } - return handler.Handle(ad.ctx, a, acker) + return handler.Handle(ctx, a, acker) } func detectTypes(actions []fleetapi.Action) []string { diff --git a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher_test.go b/internal/pkg/agent/application/dispatcher/dispatcher_test.go similarity index 64% rename from internal/pkg/agent/application/pipeline/dispatcher/dispatcher_test.go rename to internal/pkg/agent/application/dispatcher/dispatcher_test.go index 3c65dd4a2e7..21d1cf1fd83 100644 --- a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher_test.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher_test.go @@ -9,22 +9,18 @@ import ( "testing" "time" - "go.elastic.co/apm" - "go.elastic.co/apm/apmtest" - + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" ) type mockHandler struct { mock.Mock } -func (h *mockHandler) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *mockHandler) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { args := h.Called(ctx, a, acker) return args.Error(0) } @@ -61,52 +57,13 @@ func (m *mockAction) Expiration() (time.Time, error) { return args.Get(0).(time.Time), args.Error(1) } -type mockAcker struct { - mock.Mock -} - -func (m *mockAcker) Ack(ctx context.Context, action fleetapi.Action) error { - args := m.Called(ctx, action) - return args.Error(0) -} - -func (m *mockAcker) Commit(ctx context.Context) error { - args := m.Called(ctx) - return args.Error(0) -} - func TestActionDispatcher(t *testing.T) { - ack := noopacker.NewAcker() - - t.Run("Merges ActionDispatcher ctx cancel and Dispatch ctx value", func(t *testing.T) { - action1 := &mockAction{} - def := &mockHandler{} - def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - span := apmtest.NewRecordingTracer(). - StartTransaction("ignore", "ignore"). - StartSpan("ignore", "ignore", nil) - ctx1, cancel := context.WithCancel(context.Background()) - ack := &mockAcker{} - ack.On("Commit", mock.Anything).Run(func(args mock.Arguments) { - ctx, _ := args.Get(0).(context.Context) - require.NoError(t, ctx.Err()) - got := apm.SpanFromContext(ctx) - require.Equal(t, span.TraceContext().Span, got.ParentID()) - cancel() // cancel function from ctx1 - require.Equal(t, ctx.Err(), context.Canceled) - }).Return(nil) - d, err := New(ctx1, nil, def) - require.NoError(t, err) - ctx2 := apm.ContextWithSpan(context.Background(), span) - err = d.Dispatch(ctx2, ack, action1) - require.NoError(t, err) - ack.AssertExpectations(t) - }) + ack := noop.New() t.Run("Success to dispatch multiples events", func(t *testing.T) { ctx := context.Background() def := &mockHandler{} - d, err := New(ctx, nil, def) + d, err := New(nil, def) require.NoError(t, err) success1 := &mockHandler{} @@ -136,7 +93,7 @@ func TestActionDispatcher(t *testing.T) { def := &mockHandler{} def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() ctx := context.Background() - d, err := New(ctx, nil, def) + d, err := New(nil, def) require.NoError(t, err) action := &mockUnknownAction{} @@ -151,7 +108,7 @@ func TestActionDispatcher(t *testing.T) { success2 := &mockHandler{} def := &mockHandler{} - d, err := New(context.Background(), nil, def) + d, err := New(nil, def) require.NoError(t, err) err = d.Register(&mockAction{}, success1) diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 4ff4c34ad42..c7de26f9609 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -8,19 +8,18 @@ import ( "context" stderr "errors" "fmt" - "sync" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + dispatcher2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" + agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "time" - "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/core/backoff" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/scheduler" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -54,10 +53,6 @@ type agentInfo interface { AgentID() string } -type fleetReporter interface { - Events() ([]fleetapi.SerializableEvent, func()) -} - type stateStore interface { Add(fleetapi.Action) AckToken() string @@ -75,109 +70,102 @@ type actionQueue interface { } type fleetGateway struct { - bgContext context.Context - log *logger.Logger - dispatcher pipeline.Dispatcher - client client.Sender - scheduler scheduler.Scheduler - backoff backoff.Backoff - settings *fleetGatewaySettings - agentInfo agentInfo - reporter fleetReporter - done chan struct{} - wg sync.WaitGroup - acker store.FleetAcker - unauthCounter int - statusController status.Controller - statusReporter status.Reporter - stateStore stateStore - queue actionQueue + log *logger.Logger + dispatcher dispatcher2.Dispatcher + client client.Sender + scheduler scheduler.Scheduler + settings *fleetGatewaySettings + agentInfo agentInfo + acker acker.Acker + unauthCounter int + stateFetcher coordinator.StateFetcher + stateStore stateStore + queue actionQueue + errCh chan error } // New creates a new fleet gateway func New( - ctx context.Context, log *logger.Logger, agentInfo agentInfo, client client.Sender, - d pipeline.Dispatcher, - r fleetReporter, - acker store.FleetAcker, - statusController status.Controller, + d dispatcher2.Dispatcher, + acker acker.Acker, + stateFetcher coordinator.StateFetcher, stateStore stateStore, queue actionQueue, ) (gateway.FleetGateway, error) { scheduler := scheduler.NewPeriodicJitter(defaultGatewaySettings.Duration, defaultGatewaySettings.Jitter) return newFleetGatewayWithScheduler( - ctx, log, defaultGatewaySettings, agentInfo, client, d, scheduler, - r, acker, - statusController, + stateFetcher, stateStore, queue, ) } func newFleetGatewayWithScheduler( - ctx context.Context, log *logger.Logger, settings *fleetGatewaySettings, agentInfo agentInfo, client client.Sender, - d pipeline.Dispatcher, + d dispatcher2.Dispatcher, scheduler scheduler.Scheduler, - r fleetReporter, - acker store.FleetAcker, - statusController status.Controller, + acker acker.Acker, + stateFetcher coordinator.StateFetcher, stateStore stateStore, queue actionQueue, ) (gateway.FleetGateway, error) { - - // Backoff implementation doesn't support the use of a context [cancellation] - // as the shutdown mechanism. - // So we keep a done channel that will be closed when the current context is shutdown. - done := make(chan struct{}) - return &fleetGateway{ - bgContext: ctx, - log: log, - dispatcher: d, - client: client, - settings: settings, - agentInfo: agentInfo, - scheduler: scheduler, - backoff: backoff.NewEqualJitterBackoff( - done, - settings.Backoff.Init, - settings.Backoff.Max, - ), - done: done, - reporter: r, - acker: acker, - statusReporter: statusController.RegisterComponent("gateway"), - statusController: statusController, - stateStore: stateStore, - queue: queue, + log: log, + dispatcher: d, + client: client, + settings: settings, + agentInfo: agentInfo, + scheduler: scheduler, + acker: acker, + stateFetcher: stateFetcher, + stateStore: stateStore, + queue: queue, + errCh: make(chan error), }, nil } -func (f *fleetGateway) worker() { +func (f *fleetGateway) Run(ctx context.Context) error { + // Backoff implementation doesn't support the use of a context [cancellation] as the shutdown mechanism. + // So we keep a done channel that will be closed when the current context is shutdown. + done := make(chan struct{}) + backoff := backoff.NewEqualJitterBackoff( + done, + f.settings.Backoff.Init, + f.settings.Backoff.Max, + ) + go func() { + <-ctx.Done() + close(done) + }() + + f.log.Info("Fleet gateway started") for { select { + case <-ctx.Done(): + f.scheduler.Stop() + f.log.Info("Fleet gateway stopped") + return ctx.Err() case ts := <-f.scheduler.WaitTick(): f.log.Debug("FleetGateway calling Checkin API") // Execute the checkin call and for any errors returned by the fleet-server API // the function will retry to communicate with fleet-server with an exponential delay and some // jitter to help better distribute the load from a fleet of agents. - resp, err := f.doExecute() + resp, err := f.doExecute(ctx, backoff) if err != nil { continue } @@ -194,35 +182,36 @@ func (f *fleetGateway) worker() { actions = append(actions, queued...) - var errMsg string // Persist state + hadErr := false f.stateStore.SetQueue(f.queue.Actions()) if err := f.stateStore.Save(); err != nil { - errMsg = fmt.Sprintf("failed to persist action_queue, error: %s", err) - f.log.Error(errMsg) - f.statusReporter.Update(state.Failed, errMsg, nil) + err = fmt.Errorf("failed to persist action_queue, error: %w", err) + f.log.Error(err) + f.errCh <- err + hadErr = true } if err := f.dispatcher.Dispatch(context.Background(), f.acker, actions...); err != nil { - errMsg = fmt.Sprintf("failed to dispatch actions, error: %s", err) - f.log.Error(errMsg) - f.statusReporter.Update(state.Failed, errMsg, nil) + err = fmt.Errorf("failed to dispatch actions, error: %w", err) + f.log.Error(err) + f.errCh <- err + hadErr = true } f.log.Debugf("FleetGateway is sleeping, next update in %s", f.settings.Duration) - if errMsg != "" { - f.statusReporter.Update(state.Failed, errMsg, nil) - } else { - f.statusReporter.Update(state.Healthy, "", nil) + if !hadErr { + f.errCh <- nil } - - case <-f.bgContext.Done(): - f.stop() - return } } } +// Errors returns the channel to watch for reported errors. +func (f *fleetGateway) Errors() <-chan error { + return f.errCh +} + // queueScheduledActions will add any action in actions with a valid start time to the queue and return the rest. // start time to current time comparisons are purposefully not made in case of cancel actions. func (f *fleetGateway) queueScheduledActions(input fleetapi.Actions) []fleetapi.Action { @@ -277,17 +266,17 @@ func (f *fleetGateway) gatherQueuedActions(ts time.Time) (queued, expired []flee return queued, expired } -func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { - f.backoff.Reset() +func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*fleetapi.CheckinResponse, error) { + bo.Reset() // Guard if the context is stopped by a out of bound call, // this mean we are rebooting to change the log level or the system is shutting us down. - for f.bgContext.Err() == nil { + for ctx.Err() == nil { f.log.Debugf("Checking started") - resp, err := f.execute(f.bgContext) + resp, err := f.execute(ctx) if err != nil { f.log.Errorf("Could not communicate with fleet-server Checking API will retry, error: %s", err) - if !f.backoff.Wait() { + if !bo.Wait() { // Something bad has happened and we log it and we should update our current state. err := errors.New( "execute retry loop was stopped", @@ -296,7 +285,7 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { ) f.log.Error(err) - f.statusReporter.Update(state.Failed, err.Error(), nil) + f.errCh <- err return nil, err } continue @@ -307,13 +296,10 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { // This mean that the next loop was cancelled because of the context, we should return the error // but we should not log it, because we are in the process of shutting down. - return nil, f.bgContext.Err() + return nil, ctx.Err() } func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { - // get events - ee, ack := f.reporter.Events() - ecsMeta, err := info.Metadata() if err != nil { f.log.Error(errors.New("failed to load metadata", err)) @@ -325,13 +311,15 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, f.log.Debugf("using previously saved ack token: %v", ackToken) } + // get current state + state := f.stateFetcher.State() + // checkin cmd := fleetapi.NewCheckinCmd(f.agentInfo, f.client) req := &fleetapi.CheckinRequest{ AckToken: ackToken, - Events: ee, Metadata: ecsMeta, - Status: f.statusController.StatusString(), + Status: agentStateToString(state.State), } resp, err := cmd.Execute(ctx, req) @@ -362,8 +350,6 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, } } - // ack events so they are dropped from queue - ack() return resp, nil } @@ -376,25 +362,16 @@ func isUnauth(err error) bool { return errors.Is(err, client.ErrInvalidAPIKey) } -func (f *fleetGateway) Start() error { - f.wg.Add(1) - go func(wg *sync.WaitGroup) { - defer f.log.Info("Fleet gateway is stopped") - defer wg.Done() - - f.worker() - }(&f.wg) - return nil -} - -func (f *fleetGateway) stop() { - f.log.Info("Fleet gateway is stopping") - defer f.scheduler.Stop() - f.statusReporter.Unregister() - close(f.done) - f.wg.Wait() -} - func (f *fleetGateway) SetClient(c client.Sender) { f.client = c } + +func agentStateToString(state agentclient.State) string { + switch state { + case agentclient.Healthy: + return "online" + case agentclient.Failed: + return "error" + } + return "degraded" +} diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index a9b9380519f..deb871192bc 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -8,12 +8,13 @@ package fleet import ( "bytes" "context" - "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" + "os" + "path/filepath" "sync" "testing" "time" @@ -22,15 +23,13 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - repo "github.com/elastic/elastic-agent/internal/pkg/reporter" - fleetreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet" - fleetreporterConfig "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/elastic/elastic-agent/internal/pkg/scheduler" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -45,9 +44,9 @@ type testingClient struct { func (t *testingClient) Send( _ context.Context, - method string, - path string, - params url.Values, + _ string, + _ string, + _ url.Values, headers http.Header, body io.Reader, ) (*http.Response, error) { @@ -80,7 +79,7 @@ type testingDispatcher struct { received chan struct{} } -func (t *testingDispatcher) Dispatch(_ context.Context, acker store.FleetAcker, actions ...fleetapi.Action) error { +func (t *testingDispatcher) Dispatch(_ context.Context, acker acker.Acker, actions ...fleetapi.Action) error { t.Lock() defer t.Unlock() defer func() { t.received <- struct{}{} }() @@ -135,7 +134,7 @@ func (m *mockQueue) Actions() []fleetapi.Action { return args.Get(0).([]fleetapi.Action) } -type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper, repo.Backend) +type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper) func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGatewayFunc) func(t *testing.T) { return func(t *testing.T) { @@ -144,37 +143,29 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat dispatcher := newTestingDispatcher() log, _ := logger.New("fleet_gateway", false) - rep := getReporter(agentInfo, log, t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) queue := &mockQueue{} queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - rep, - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) require.NoError(t, err) - fn(t, gateway, client, dispatcher, scheduler, rep) + fn(t, gateway, client, dispatcher, scheduler) } } @@ -212,8 +203,10 @@ func TestFleetGateway(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + waitFn := ackSeq( client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) @@ -224,12 +217,16 @@ func TestFleetGateway(t *testing.T) { return nil }), ) - err := gateway.Start() - require.NoError(t, err) + + errCh := runFleetGateway(ctx, gateway) // Synchronize scheduler and acking of calls from the worker go routine. scheduler.Next() waitFn() + + cancel() + err := <-errCh + require.NoError(t, err) })) t.Run("Successfully connects and receives a series of actions", withGateway(agentInfo, settings, func( @@ -238,8 +235,10 @@ func TestFleetGateway(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + waitFn := ackSeq( client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { // TODO: assert no events @@ -269,11 +268,15 @@ func TestFleetGateway(t *testing.T) { return nil }), ) - err := gateway.Start() - require.NoError(t, err) + + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() + + cancel() + err := <-errCh + require.NoError(t, err) })) // Test the normal time based execution. @@ -286,30 +289,24 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) queue := &mockQueue{} queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -323,8 +320,7 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) var count int for { @@ -334,6 +330,10 @@ func TestFleetGateway(t *testing.T) { return } } + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("queue action from checkin", func(t *testing.T) { @@ -345,10 +345,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -357,20 +354,17 @@ func TestFleetGateway(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -395,12 +389,15 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("run action from queue", func(t *testing.T) { @@ -412,10 +409,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -423,20 +417,17 @@ func TestFleetGateway(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -450,12 +441,15 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("discard expired action from queue", func(t *testing.T) { @@ -467,10 +461,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -478,20 +469,17 @@ func TestFleetGateway(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -505,12 +493,15 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("cancel action from checkin", func(t *testing.T) { @@ -522,10 +513,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -535,20 +523,17 @@ func TestFleetGateway(t *testing.T) { // queue.Cancel does not need to be mocked here as it is ran in the cancel action dispatcher. gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -578,52 +563,16 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) - }) - t.Run("send event and receive no action", withGateway(agentInfo, settings, func( - t *testing.T, - gateway gateway.FleetGateway, - client *testingClient, - dispatcher *testingDispatcher, - scheduler *scheduler.Stepper, - rep repo.Backend, - ) { - _ = rep.Report(context.Background(), &testStateEvent{}) - waitFn := ackSeq( - client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { - cr := &request{} - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal(content, &cr) - if err != nil { - t.Fatal(err) - } - - require.Equal(t, 1, len(cr.Events)) - - resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) - return resp, nil - }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 0, len(actions)) - return nil - }), - ) - err := gateway.Start() + cancel() + err = <-errCh require.NoError(t, err) - - // Synchronize scheduler and acking of calls from the worker go routine. - scheduler.Next() - waitFn() - })) + }) t.Run("Test the wait loop is interruptible", func(t *testing.T) { // 20mins is the double of the base timeout values for golang test suites. @@ -634,18 +583,15 @@ func TestFleetGateway(t *testing.T) { dispatcher := newTestingDispatcher() ctx, cancel := context.WithCancel(context.Background()) - log, _ := logger.New("tst", false) - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + log, _ := logger.New("tst", false) + stateStore := newStateStore(t, log) queue := &mockQueue{} queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, &fleetGatewaySettings{ Duration: d, @@ -655,13 +601,11 @@ func TestFleetGateway(t *testing.T) { client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) ch1 := dispatcher.Answer(func(actions ...fleetapi.Action) error { return nil }) @@ -670,8 +614,7 @@ func TestFleetGateway(t *testing.T) { return resp, nil }) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) // Silently dispatch action. go func() { @@ -694,6 +637,8 @@ func TestFleetGateway(t *testing.T) { // 2. WaitTick() will block for 20 minutes. // 3. Stop will should unblock the wait. cancel() + err = <-errCh + require.NoError(t, err) }) } @@ -712,16 +657,16 @@ func TestRetriesOnFailures(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil } clientWaitFn := client.Answer(fail) - err := gateway.Start() - require.NoError(t, err) - _ = rep.Report(context.Background(), &testStateEvent{}) + errCh := runFleetGateway(ctx, gateway) // Initial tick is done out of bound so we can block on channels. scheduler.Next() @@ -734,18 +679,6 @@ func TestRetriesOnFailures(t *testing.T) { // API recover waitFn := ackSeq( client.Answer(func(_ http.Header, body io.Reader) (*http.Response, error) { - cr := &request{} - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal(content, &cr) - if err != nil { - t.Fatal(err) - } - - require.Equal(t, 1, len(cr.Events)) - resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) return resp, nil }), @@ -757,6 +690,10 @@ func TestRetriesOnFailures(t *testing.T) { ) waitFn() + + cancel() + err := <-errCh + require.NoError(t, err) })) t.Run("The retry loop is interruptible", @@ -769,16 +706,16 @@ func TestRetriesOnFailures(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil } waitChan := client.Answer(fail) - err := gateway.Start() - require.NoError(t, err) - _ = rep.Report(context.Background(), &testStateEvent{}) + errCh := runFleetGateway(ctx, gateway) // Initial tick is done out of bound so we can block on channels. scheduler.Next() @@ -787,32 +724,59 @@ func TestRetriesOnFailures(t *testing.T) { // delay. <-waitChan - // non-obvious but withGateway on return will stop the gateway before returning and we should - // exit the retry loop. The init value of the backoff is set to exceed the test default timeout. + cancel() + err := <-errCh + require.NoError(t, err) })) } -func getReporter(info agentInfo, log *logger.Logger, t *testing.T) *fleetreporter.Reporter { - fleetR, err := fleetreporter.NewReporter(info, log, fleetreporterConfig.DefaultConfig()) - if err != nil { - t.Fatal(errors.Wrap(err, "fail to create reporters")) - } +type testAgentInfo struct{} + +func (testAgentInfo) AgentID() string { return "agent-secret" } + +type emptyStateFetcher struct{} - return fleetR +func (e *emptyStateFetcher) State() coordinator.State { + return coordinator.State{} } -type testAgentInfo struct{} +func runFleetGateway(ctx context.Context, g gateway.FleetGateway) <-chan error { + done := make(chan bool) + errCh := make(chan error, 1) + go func() { + err := g.Run(ctx) + close(done) + if err != nil && !errors.Is(err, context.Canceled) { + errCh <- err + } else { + errCh <- nil + } + }() + go func() { + for { + select { + case <-done: + return + case <-g.Errors(): + // ignore errors here + } + } + }() + return errCh +} -func (testAgentInfo) AgentID() string { return "agent-secret" } +func newStateStore(t *testing.T, log *logger.Logger) *store.StateStore { + dir, err := ioutil.TempDir("", "fleet-gateway-unit-test") + require.NoError(t, err) -type testStateEvent struct{} + filename := filepath.Join(dir, "state.enc") + diskStore := storage.NewDiskStore(filename) + stateStore, err := store.NewStateStore(log, diskStore) + require.NoError(t, err) -func (testStateEvent) Type() string { return repo.EventTypeState } -func (testStateEvent) SubType() string { return repo.EventSubTypeInProgress } -func (testStateEvent) Time() time.Time { return time.Unix(0, 1) } -func (testStateEvent) Message() string { return "hello" } -func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } + t.Cleanup(func() { + os.RemoveAll(dir) + }) -type request struct { - Events []interface{} `json:"events"` + return stateStore } diff --git a/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go b/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go deleted file mode 100644 index d5097655a63..00000000000 --- a/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" -) - -type noopController struct{} - -func (*noopController) RegisterComponent(_ string) status.Reporter { return &noopReporter{} } -func (*noopController) RegisterComponentWithPersistance(_ string, _ bool) status.Reporter { - return &noopReporter{} -} -func (*noopController) RegisterApp(_ string, _ string) status.Reporter { return &noopReporter{} } -func (*noopController) Status() status.AgentStatus { return status.AgentStatus{Status: status.Healthy} } -func (*noopController) StatusCode() status.AgentStatusCode { return status.Healthy } -func (*noopController) UpdateStateID(_ string) {} -func (*noopController) StatusString() string { return "online" } - -type noopReporter struct{} - -func (*noopReporter) Update(_ state.Status, _ string, _ map[string]interface{}) {} -func (*noopReporter) Unregister() {} diff --git a/internal/pkg/agent/application/gateway/gateway.go b/internal/pkg/agent/application/gateway/gateway.go index 47591a4a04e..d43dd32a0c2 100644 --- a/internal/pkg/agent/application/gateway/gateway.go +++ b/internal/pkg/agent/application/gateway/gateway.go @@ -4,16 +4,23 @@ package gateway -import "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" +import ( + "context" + + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" +) // FleetGateway is a gateway between the Agent and the Fleet API, it's take cares of all the // bidirectional communication requirements. The gateway aggregates events and will periodically // call the API to send the events and will receive actions to be executed locally. // The only supported action for now is a "ActionPolicyChange". type FleetGateway interface { - // Start starts the gateway. - Start() error + // Run runs the gateway. + Run(ctx context.Context) error + + // Errors returns the channel to watch for reported errors. + Errors() <-chan error - // Set the client for the gateway. + // SetClient sets the client for the gateway. SetClient(client.Sender) } diff --git a/internal/pkg/agent/application/local_mode.go b/internal/pkg/agent/application/local_mode.go deleted file mode 100644 index 599174b4fa8..00000000000 --- a/internal/pkg/agent/application/local_mode.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package application - -import ( - "context" - "fmt" - "go.elastic.co/apm" - "path/filepath" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/dir" - acker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/component" - "github.com/elastic/elastic-agent/pkg/component/runtime" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type discoverFunc func() ([]string, error) - -// ErrNoConfiguration is returned when no configuration are found. -var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) - -// Local represents a standalone agents, that will read his configuration directly from disk. -// Some part of the configuration can be reloaded. -type Local struct { - log *logger.Logger - agentInfo *info.AgentInfo - caps capabilities.Capability - reexec reexecManager - uc upgraderControl - downloadCfg *artifact.Config - - runtime coordinator.RuntimeManager - config coordinator.ConfigManager - composable coordinator.VarsManager - - coordinator *coordinator.Coordinator -} - -// newLocal return an agent managed by local configuration. -func newLocal( - log *logger.Logger, - specs component.RuntimeSpecs, - caps capabilities.Capability, - cfg *configuration.Configuration, - pathConfigFile string, - rawConfig *config.Config, - reexec reexecManager, - statusCtrl status.Controller, - uc upgraderControl, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (*Local, error) { - localApplication := &Local{ - log: log, - agentInfo: agentInfo, - caps: caps, - reexec: reexec, - uc: uc, - downloadCfg: cfg.Settings.DownloadConfig, - } - - loader := config.NewLoader(log, externalConfigsGlob()) - discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) - if !cfg.Settings.Reload.Enabled { - log.Debug("Reloading of configuration is off") - localApplication.config = newOnce(log, discover, loader) - } else { - log.Debugf("Reloading of configuration is on, frequency is set to %s", cfg.Settings.Reload.Period) - localApplication.config = newPeriodic(log, cfg.Settings.Reload.Period, discover, loader) - } - - var err error - localApplication.runtime, err = runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) - if err != nil { - return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) - } - localApplication.composable, err = composable.New(log, rawConfig) - if err != nil { - return nil, errors.New(err, "failed to initialize composable controller") - } - - localApplication.coordinator = coordinator.New(log, specs, localApplication.runtime, localApplication.config, localApplication.composable, caps) - - return localApplication, nil -} - -func externalConfigsGlob() string { - return filepath.Join(paths.Config(), configuration.ExternalInputsPattern) -} - -// Routes returns a list of routes handled by agent. -func (l *Local) Routes() *sorted.Set { - return nil -} - -// Run runs the local agent. -// -// Blocks until the context is cancelled. -func (l *Local) Run(ctx context.Context) error { - ctx, cancel := context.WithCancel(ctx) - - u := upgrade.NewUpgrader( - l.agentInfo, - l.downloadCfg, - l.log, - []context.CancelFunc{cancel}, - l.reexec, - acker.NewAcker(), - nil, - l.caps) - l.uc.SetUpgrader(u) - - err := l.coordinator.Run(ctx) - - l.uc.SetUpgrader(nil) - return err -} - -// AgentInfo retrieves agent information. -func (l *Local) AgentInfo() *info.AgentInfo { - return l.agentInfo -} - -func discoverer(patterns ...string) discoverFunc { - var p []string - for _, newP := range patterns { - if len(newP) == 0 { - continue - } - - p = append(p, newP) - } - - if len(p) == 0 { - return func() ([]string, error) { - return []string{}, ErrNoConfiguration - } - } - - return func() ([]string, error) { - return dir.DiscoverFiles(p...) - } -} diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index 036ffebdc89..b68d010ada4 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -4,97 +4,49 @@ package application -/* import ( "context" "fmt" + handlers2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/pkg/component" - "github.com/elastic/elastic-agent/pkg/component/runtime" - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" fleetgateway "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway/fleet" - localgateway "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway/fleetserver" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions/handlers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/dispatcher" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/stream" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/fleet" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/lazy" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/retrier" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/queue" - reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - fleetreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet" - logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" - "github.com/elastic/elastic-agent/internal/pkg/sorted" + "github.com/elastic/elastic-agent/internal/pkg/remote" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) -type stateStore interface { - Add(fleetapi.Action) - AckToken() string - SetAckToken(ackToken string) - Save() error - Actions() []fleetapi.Action - Queue() []fleetapi.Action -} - -// Managed application, when the application is run in managed mode, most of the configuration are -// coming from the Fleet App. -type Managed struct { - log *logger.Logger - Config configuration.FleetAgentConfig - gateway gateway.FleetGateway - stateStore stateStore - upgrader *upgrade.Upgrader - +type managedConfigManager struct { + log *logger.Logger agentInfo *info.AgentInfo - caps capabilities.Capability - reexec reexecManager - uc upgraderControl - downloadCfg *artifact.Config - - runtime coordinator.RuntimeManager - config coordinator.ConfigManager - composable coordinator.VarsManager - - coordinator *coordinator.Coordinator + cfg *configuration.Configuration + client *remote.Client + store storage.Store + stateStore *store.StateStore + actionQueue *queue.ActionQueue + coord *coordinator.Coordinator + + ch chan coordinator.ConfigChange + errCh chan error } -func newManaged( +func newManagedConfigManager( log *logger.Logger, - specs component.RuntimeSpecs, - caps capabilities.Capability, + agentInfo *info.AgentInfo, cfg *configuration.Configuration, storeSaver storage.Store, - rawConfig *config.Config, - reexec reexecManager, - statusCtrl status.Controller, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (*Managed, error) { +) (*managedConfigManager, error) { client, err := client.NewAuthWithConfig(log, cfg.Fleet.AccessAPIKey, cfg.Fleet.Client) if err != nil { return nil, errors.New(err, @@ -103,68 +55,179 @@ func newManaged( errors.M(errors.MetaKeyURI, cfg.Fleet.Client.Host)) } - managedApplication := &Managed{ - log: log, - agentInfo: agentInfo, + // Create the state store that will persist the last good policy change on disk. + stateStore, err := store.NewStateStoreWithMigration(log, paths.AgentActionStoreFile(), paths.AgentStateStoreFile()) + if err != nil { + return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", paths.AgentActionStoreFile())) } - managedApplication.runtime, err = runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) + actionQueue, err := queue.NewActionQueue(stateStore.Queue()) if err != nil { - return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) + return nil, fmt.Errorf("unable to initialize action queue: %w", err) } - managedApplication.composable, err = composable.New(log, rawConfig) - if err != nil { - return nil, errors.New(err, "failed to initialize composable controller") + + return &managedConfigManager{ + log: log, + agentInfo: agentInfo, + cfg: cfg, + client: client, + store: storeSaver, + stateStore: stateStore, + actionQueue: actionQueue, + ch: make(chan coordinator.ConfigChange), + errCh: make(chan error), + }, nil +} + +func (m *managedConfigManager) Run(ctx context.Context) error { + // Check setup correctly in application (the actionDispatcher and coord must be set manually) + if m.coord == nil { + return errors.New("coord must be set before calling Run") } - managedApplication.coordinator = coordinator.New(log, specs, managedApplication.runtime, managedApplication.config, managedApplication.composable, caps) + // Un-enrolled so we will not do anything. + if m.wasUnenrolled() { + m.log.Warnf("Elastic Agent was previously unenrolled. To reactivate please reconfigure or enroll again.") + return nil + } - acker, err := fleet.NewAcker(log, agentInfo, client) - if err != nil { - return nil, err + // Reload ID because of win7 sync issue + if err := m.agentInfo.ReloadID(); err != nil { + return err } - // Create ack retrier that is used by lazyAcker to enqueue/retry failed acks - retrier := retrier.New(acker, log) - // Run acking retrier. The lazy acker sends failed actions acks to retrier. - go retrier.Run(ctx) + // Create context that is cancelled on unenroll. + gatewayCtx, gatewayCancel := context.WithCancel(ctx) + defer gatewayCancel() - batchedAcker := lazy.NewAcker(acker, log, lazy.WithRetrier(retrier)) + // Create the actionDispatcher. + actionDispatcher, err := newManagedActionDispatcher(m, gatewayCancel) + if err != nil { + return err + } - // Create the state store that will persist the last good policy change on disk. - stateStore, err := store.NewStateStoreWithMigration(log, paths.AgentActionStoreFile(), paths.AgentStateStoreFile()) + // Create ackers to enqueue/retry failed acks + ack, err := fleet.NewAcker(m.log, m.agentInfo, m.client) if err != nil { - return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", paths.AgentActionStoreFile())) + return fmt.Errorf("failed to create acker: %w", err) } - managedApplication.stateStore = stateStore - actionAcker := store.NewStateStoreActionAcker(batchedAcker, stateStore) + retrier := retrier.New(ack, m.log) + batchedAcker := lazy.NewAcker(ack, m.log, lazy.WithRetrier(retrier)) + actionAcker := store.NewStateStoreActionAcker(batchedAcker, m.stateStore) + + // Run the retrier. + retrierRun := make(chan bool) + retrierCtx, retrierCancel := context.WithCancel(ctx) + defer func() { + retrierCancel() + <-retrierRun + }() + go func() { + retrier.Run(retrierCtx) + close(retrierRun) + }() - actionQueue, err := queue.NewActionQueue(stateStore.Queue()) + actions := m.stateStore.Actions() + //stateRestored := false + if len(actions) > 0 && !m.wasUnenrolled() { + // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a + // persisted action on disk we should be able to ask Fleet to get the latest configuration. + // But at the moment this is not possible because the policy change was acked. + if err := store.ReplayActions(ctx, m.log, actionDispatcher, actionAcker, actions...); err != nil { + m.log.Errorf("could not recover state, error %+v, skipping...", err) + } + //stateRestored = true + } + + gateway, err := fleetgateway.New( + m.log, + m.agentInfo, + m.client, + actionDispatcher, + actionAcker, + m.coord, + m.stateStore, + m.actionQueue, + ) if err != nil { - return nil, fmt.Errorf("unable to initialize action queue: %w", err) + return err } - actionDispatcher, err := dispatcher.New(managedApplication.bgContext, log, handlers.NewDefault(log)) + // Proxy errors from the gateway to our own channel. + go func() { + for { + select { + case <-ctx.Done(): + return + case err := <-gateway.Errors(): + m.errCh <- err + } + } + }() + + // Run the gateway. + gatewayRun := make(chan bool) + gatewayErrCh := make(chan error) + defer func() { + gatewayCancel() + <-gatewayRun + }() + go func() { + err := gateway.Run(gatewayCtx) + close(gatewayRun) + gatewayErrCh <- err + }() + + /* + gateway, err = localgateway.New(ctx, m.log, m.cfg.Fleet, rawConfig, gateway, emit, !stateRestored) + if err != nil { + return nil, err + } + // add the acker and gateway to setters, so the they can be updated + // when the hosts for Fleet Server are updated by the policy. + if cfg.Fleet.Server == nil { + // setters only set when not running a local Fleet Server + policyChanger.AddSetter(gateway) + policyChanger.AddSetter(acker) + } + + managedApplication.gateway = gateway + */ + + <-ctx.Done() + return <-gatewayErrCh +} + +func (m *managedConfigManager) Errors() <-chan error { + return m.errCh +} + +func (m *managedConfigManager) Watch() <-chan coordinator.ConfigChange { + return m.ch +} + +func (m *managedConfigManager) wasUnenrolled() bool { + actions := m.stateStore.Actions() + for _, a := range actions { + if a.Type() == "UNENROLL" { + return true + } + } + return false +} + +func newManagedActionDispatcher(m *managedConfigManager, canceller context.CancelFunc) (*dispatcher.ActionDispatcher, error) { + actionDispatcher, err := dispatcher.New(m.log, handlers2.NewDefault(m.log)) if err != nil { return nil, err } - managedApplication.upgrader = upgrade.NewUpgrader( - agentInfo, - cfg.Settings.DownloadConfig, - log, - []context.CancelFunc{managedApplication.cancelCtxFn}, - reexec, - acker, - combinedReporter, - caps) - - policyChanger := handlers.NewPolicyChange( - log, - emit, - agentInfo, - cfg, - storeSaver, + policyChanger := handlers2.NewPolicyChange( + m.log, + m.agentInfo, + m.cfg, + m.store, + m.ch, ) actionDispatcher.MustRegister( @@ -174,147 +237,50 @@ func newManaged( actionDispatcher.MustRegister( &fleetapi.ActionPolicyReassign{}, - handlers.NewPolicyReassign(log), + handlers2.NewPolicyReassign(m.log), ) actionDispatcher.MustRegister( &fleetapi.ActionUnenroll{}, - handlers.NewUnenroll( - log, - emit, - router, - []context.CancelFunc{managedApplication.cancelCtxFn}, - stateStore, + handlers2.NewUnenroll( + m.log, + m.ch, + []context.CancelFunc{canceller}, + m.stateStore, ), ) actionDispatcher.MustRegister( &fleetapi.ActionUpgrade{}, - handlers.NewUpgrade(log, managedApplication.upgrader), + handlers2.NewUpgrade(m.log, m.coord), ) actionDispatcher.MustRegister( &fleetapi.ActionSettings{}, - handlers.NewSettings( - log, - reexec, - agentInfo, + handlers2.NewSettings( + m.log, + m.agentInfo, + m.coord, ), ) actionDispatcher.MustRegister( &fleetapi.ActionCancel{}, - handlers.NewCancel( - log, - actionQueue, + handlers2.NewCancel( + m.log, + m.actionQueue, ), ) actionDispatcher.MustRegister( &fleetapi.ActionApp{}, - handlers.NewAppAction(log, managedApplication.srv), + handlers2.NewAppAction(m.log, m.coord), ) actionDispatcher.MustRegister( &fleetapi.ActionUnknown{}, - handlers.NewUnknown(log), + handlers2.NewUnknown(m.log), ) - actions := stateStore.Actions() - stateRestored := false - if len(actions) > 0 && !managedApplication.wasUnenrolled() { - // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a - // persisted action on disk we should be able to ask Fleet to get the latest configuration. - // But at the moment this is not possible because the policy change was acked. - if err := store.ReplayActions(ctx, log, actionDispatcher, actionAcker, actions...); err != nil { - log.Errorf("could not recover state, error %+v, skipping...", err) - } - stateRestored = true - } - - gateway, err := fleetgateway.New( - managedApplication.bgContext, - log, - agentInfo, - client, - actionDispatcher, - fleetR, - actionAcker, - statusCtrl, - stateStore, - actionQueue, - ) - if err != nil { - return nil, err - } - gateway, err = localgateway.New(managedApplication.bgContext, log, cfg.Fleet, rawConfig, gateway, emit, !stateRestored) - if err != nil { - return nil, err - } - // add the acker and gateway to setters, so the they can be updated - // when the hosts for Fleet Server are updated by the policy. - if cfg.Fleet.Server == nil { - // setters only set when not running a local Fleet Server - policyChanger.AddSetter(gateway) - policyChanger.AddSetter(acker) - } - - managedApplication.gateway = gateway - return managedApplication, nil -} - -// Routes returns a list of routes handled by agent. -func (m *Managed) Routes() *sorted.Set { - return m.router.Routes() -} - -// Start starts a managed elastic-agent. -func (m *Managed) Start() error { - m.log.Info("Agent is starting") - if m.wasUnenrolled() { - m.log.Warnf("agent was previously unenrolled. To reactivate please reconfigure or enroll again.") - return nil - } - - // reload ID because of win7 sync issue - if err := m.agentInfo.ReloadID(); err != nil { - return err - } - - err := m.upgrader.Ack(m.bgContext) - if err != nil { - m.log.Warnf("failed to ack update %v", err) - } - - err = m.gateway.Start() - if err != nil { - return err - } - return nil -} - -// Stop stops a managed elastic-agent. -func (m *Managed) Stop() error { - defer m.log.Info("Agent is stopped") - m.cancelCtxFn() - m.router.Shutdown() - m.srv.Stop() - return nil -} - -// AgentInfo retrieves elastic-agent information. -func (m *Managed) AgentInfo() *info.AgentInfo { - return m.agentInfo -} - -func (m *Managed) wasUnenrolled() bool { - actions := m.stateStore.Actions() - for _, a := range actions { - if a.Type() == "UNENROLL" { - return true - } - } - - return false + return actionDispatcher, nil } -*/ diff --git a/internal/pkg/agent/application/managed_mode_test.go b/internal/pkg/agent/application/managed_mode_test.go index 847211dc079..2ebf26cc1aa 100644 --- a/internal/pkg/agent/application/managed_mode_test.go +++ b/internal/pkg/agent/application/managed_mode_test.go @@ -7,6 +7,8 @@ package application import ( "context" "encoding/json" + handlers2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" "testing" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" @@ -17,8 +19,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions/handlers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/dispatcher" "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" @@ -50,13 +50,13 @@ func TestManagedModeRouting(t *testing.T) { emit, err := emitter.New(ctx, log, agentInfo, composableCtrl, router, &pipeline.ConfigModifiers{Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}}, nil) require.NoError(t, err) - actionDispatcher, err := dispatcher.New(ctx, log, handlers.NewDefault(log)) + actionDispatcher, err := dispatcher.New(ctx, log, handlers2.NewDefault(log)) require.NoError(t, err) cfg := configuration.DefaultConfiguration() actionDispatcher.MustRegister( &fleetapi.ActionPolicyChange{}, - handlers.NewPolicyChange( + handlers2.NewPolicyChange( log, emit, agentInfo, diff --git a/internal/pkg/agent/application/once.go b/internal/pkg/agent/application/once.go index a87b2f97151..2ef6940cff9 100644 --- a/internal/pkg/agent/application/once.go +++ b/internal/pkg/agent/application/once.go @@ -7,6 +7,7 @@ package application import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -17,11 +18,12 @@ type once struct { log *logger.Logger discover discoverFunc loader *config.Loader - ch chan *config.Config + ch chan coordinator.ConfigChange + errCh chan error } func newOnce(log *logger.Logger, discover discoverFunc, loader *config.Loader) *once { - return &once{log: log, discover: discover, loader: loader, ch: make(chan *config.Config)} + return &once{log: log, discover: discover, loader: loader, ch: make(chan coordinator.ConfigChange), errCh: make(chan error)} } func (o *once) Run(ctx context.Context) error { @@ -38,12 +40,16 @@ func (o *once) Run(ctx context.Context) error { if err != nil { return err } - o.ch <- cfg + o.ch <- &localConfigChange{cfg} <-ctx.Done() return ctx.Err() } -func (o *once) Watch() <-chan *config.Config { +func (o *once) Errors() <-chan error { + return o.errCh +} + +func (o *once) Watch() <-chan coordinator.ConfigChange { return o.ch } diff --git a/internal/pkg/agent/application/periodic.go b/internal/pkg/agent/application/periodic.go index 21900530c3e..fcc2967c163 100644 --- a/internal/pkg/agent/application/periodic.go +++ b/internal/pkg/agent/application/periodic.go @@ -6,6 +6,7 @@ package application import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "strings" "time" @@ -21,7 +22,8 @@ type periodic struct { watcher *filewatcher.Watch loader *config.Loader discover discoverFunc - ch chan *config.Config + ch chan coordinator.ConfigChange + errCh chan error } func (p *periodic) Run(ctx context.Context) error { @@ -44,7 +46,11 @@ func (p *periodic) Run(ctx context.Context) error { } } -func (p *periodic) Watch() <-chan *config.Config { +func (p *periodic) Errors() <-chan error { + return p.errCh +} + +func (p *periodic) Watch() <-chan coordinator.ConfigChange { return p.ch } @@ -97,7 +103,7 @@ func (p *periodic) work() error { p.watcher.Invalidate() return err } - p.ch <- cfg + p.ch <- &localConfigChange{cfg} return nil } @@ -124,6 +130,24 @@ func newPeriodic( watcher: w, discover: discover, loader: loader, - ch: make(chan *config.Config), + ch: make(chan coordinator.ConfigChange), + errCh: make(chan error), } } + +type localConfigChange struct { + cfg *config.Config +} + +func (l *localConfigChange) Config() *config.Config { + return l.cfg +} + +func (l *localConfigChange) Ack() error { + // do nothing + return nil +} + +func (l *localConfigChange) Fail(_ error) { + // do nothing +} diff --git a/internal/pkg/agent/application/pipeline/emitter/controller.go b/internal/pkg/agent/application/pipeline/emitter/controller.go deleted file mode 100644 index 7f83961586c..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/controller.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package emitter - -import ( - "context" - "sync" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type reloadable interface { - Reload(cfg *config.Config) error -} - -// Controller is an emitter controller handling config updates. -type Controller struct { - logger *logger.Logger - agentInfo *info.AgentInfo - controller composable.Controller - router pipeline.Router - modifiers *pipeline.ConfigModifiers - reloadables []reloadable - caps capabilities.Capability - - // state - lock sync.RWMutex - updateLock sync.Mutex - config *config.Config - ast *transpiler.AST - vars []*transpiler.Vars -} - -// NewController creates a new emitter controller. -func NewController( - log *logger.Logger, - agentInfo *info.AgentInfo, - controller composable.Controller, - router pipeline.Router, - modifiers *pipeline.ConfigModifiers, - caps capabilities.Capability, - reloadables ...reloadable, -) *Controller { - init, _ := transpiler.NewVars(map[string]interface{}{}, nil) - - return &Controller{ - logger: log, - agentInfo: agentInfo, - controller: controller, - router: router, - modifiers: modifiers, - reloadables: reloadables, - vars: []*transpiler.Vars{init}, - caps: caps, - } -} - -// Update applies config change and performes all steps necessary to apply it. -func (e *Controller) Update(ctx context.Context, c *config.Config) (err error) { - span, ctx := apm.StartSpan(ctx, "update", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - - if err := info.InjectAgentConfig(c); err != nil { - return err - } - - // perform and verify ast translation - m, err := c.ToMapStr() - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - - rawAst, err := transpiler.NewAST(m) - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - - if e.caps != nil { - var ok bool - updatedAst, err := e.caps.Apply(rawAst) - if err != nil { - return errors.New(err, "failed to apply capabilities") - } - - rawAst, ok = updatedAst.(*transpiler.AST) - if !ok { - return errors.New("failed to transform object returned from capabilities to AST", errors.TypeConfig) - } - } - - for _, filter := range e.modifiers.Filters { - if err := filter(e.logger, rawAst); err != nil { - return errors.New(err, "failed to filter configuration", errors.TypeConfig) - } - } - - e.lock.Lock() - e.config = c - e.ast = rawAst - e.lock.Unlock() - - return e.update(ctx) -} - -// Set sets the transpiler vars for dynamic inputs resolution. -func (e *Controller) Set(ctx context.Context, vars []*transpiler.Vars) { - if err := e.set(ctx, vars); err != nil { - e.logger.Errorf("Failed to render configuration with latest context from composable controller: %s", err) - } -} - -func (e *Controller) set(ctx context.Context, vars []*transpiler.Vars) (err error) { - span, ctx := apm.StartSpan(ctx, "set", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - e.lock.Lock() - ast := e.ast - e.vars = vars - e.lock.Unlock() - - if ast != nil { - return e.update(ctx) - } - return nil -} - -func (e *Controller) update(ctx context.Context) (err error) { - span, ctx := apm.StartSpan(ctx, "update", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - // locking whole update because it can be called concurrently via Set and Update method - e.updateLock.Lock() - defer e.updateLock.Unlock() - - e.lock.RLock() - cfg := e.config - rawAst := e.ast - varsArray := e.vars - e.lock.RUnlock() - - ast := rawAst.Clone() - inputs, ok := transpiler.Lookup(ast, "inputs") - if ok { - renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) - if err != nil { - return err - } - err = transpiler.Insert(ast, renderedInputs, "inputs") - if err != nil { - return errors.New(err, "inserting rendered inputs failed") - } - } - - e.logger.Debug("Converting single configuration into specific programs configuration") - - programsToRun, err := program.Programs(e.agentInfo, ast) - if err != nil { - return err - } - - for _, decorator := range e.modifiers.Decorators { - for outputType, ptr := range programsToRun { - programsToRun[outputType], err = decorator(e.agentInfo, outputType, ast, ptr) - if err != nil { - return err - } - } - } - - for _, r := range e.reloadables { - if err := r.Reload(cfg); err != nil { - return err - } - } - - return e.router.Route(ctx, ast.HashStr(), programsToRun) -} diff --git a/internal/pkg/agent/application/pipeline/emitter/emitter.go b/internal/pkg/agent/application/pipeline/emitter/emitter.go deleted file mode 100644 index 9fd15edb7e3..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/emitter.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package emitter - -import ( - "context" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// New creates a new emitter function. -func New(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, controller composable.Controller, router pipeline.Router, modifiers *pipeline.ConfigModifiers, caps capabilities.Capability, reloadables ...reloadable) (pipeline.EmitterFunc, error) { - ctrl := NewController(log, agentInfo, controller, router, modifiers, caps, reloadables...) - err := controller.Run(ctx) - if err != nil { - return nil, errors.New(err, "failed to start composable controller") - } - return func(ctx context.Context, c *config.Config) (err error) { - span, ctx := apm.StartSpan(ctx, "update", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - return ctrl.Update(ctx, c) - }, nil -} diff --git a/internal/pkg/agent/application/pipeline/emitter/emitter_test.go b/internal/pkg/agent/application/pipeline/emitter/emitter_test.go deleted file mode 100644 index a38b1bb1ded..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/emitter_test.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package emitter diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go deleted file mode 100644 index e1555393b84..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package modifiers - -import ( - "github.com/elastic/go-sysinfo/types" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// InjectFleet injects fleet metadata into a configuration. -func InjectFleet(cfg *config.Config, hostInfo types.HostInfo, agentInfo *info.AgentInfo) func(*logger.Logger, *transpiler.AST) error { - return func(logger *logger.Logger, rootAst *transpiler.AST) error { - config, err := cfg.ToMapStr() - if err != nil { - return err - } - ast, err := transpiler.NewAST(config) - if err != nil { - return err - } - fleet, ok := transpiler.Lookup(ast, "fleet") - if !ok { - // no fleet from configuration; skip - return nil - } - - // copy top-level agent.* into fleet.agent.* (this gets sent to Applications in this structure) - if agent, ok := transpiler.Lookup(ast, "agent"); ok { - if err := transpiler.Insert(ast, agent, "fleet"); err != nil { - return errors.New(err, "inserting agent info failed") - } - } - - // ensure that the agent.logging.level is present - if _, found := transpiler.Lookup(ast, "agent.logging.level"); !found { - transpiler.Insert(ast, transpiler.NewKey("level", transpiler.NewStrVal(agentInfo.LogLevel())), "agent.logging") - } - - // fleet.host to Agent can be the host to connect to Fleet Server, but to Applications it should - // be the fleet.host.id. move fleet.host to fleet.hosts if fleet.hosts doesn't exist - if _, ok := transpiler.Lookup(ast, "fleet.hosts"); !ok { - if host, ok := transpiler.Lookup(ast, "fleet.host"); ok { - if key, ok := host.(*transpiler.Key); ok { - if value, ok := key.Value().(*transpiler.StrVal); ok { - hosts := transpiler.NewList([]transpiler.Node{transpiler.NewStrVal(value.String())}) - if err := transpiler.Insert(ast, hosts, "fleet.hosts"); err != nil { - return errors.New(err, "inserting fleet hosts failed") - } - } - } - } - } - - // inject host.* into fleet.host.* (this gets sent to Applications in this structure) - host := transpiler.NewKey("host", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("id", transpiler.NewStrVal(hostInfo.UniqueID)), - })) - if err := transpiler.Insert(ast, host, "fleet"); err != nil { - return errors.New(err, "inserting list of hosts failed") - } - - // inject fleet.* from local AST to the rootAST so its present when sending to Applications. - err = transpiler.Insert(rootAst, fleet.Value().(transpiler.Node), "fleet") - if err != nil { - return errors.New(err, "inserting fleet info failed") - } - return nil - } -} diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go deleted file mode 100644 index d9377aa9e61..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package modifiers - -import ( - "crypto/md5" - "fmt" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" -) - -const ( - // MonitoringName is a name used for artificial program generated when monitoring is needed. - MonitoringName = "FLEET_MONITORING" - programsKey = "programs" - monitoringChecksumKey = "monitoring_checksum" - monitoringKey = "agent.monitoring" - monitoringUseOutputKey = "agent.monitoring.use_output" - monitoringOutputFormatKey = "outputs.%s" - outputKey = "output" - - enabledKey = "agent.monitoring.enabled" - logsKey = "agent.monitoring.logs" - metricsKey = "agent.monitoring.metrics" - outputsKey = "outputs" - elasticsearchKey = "elasticsearch" - typeKey = "type" - defaultOutputName = "default" -) - -// InjectMonitoring injects a monitoring configuration into a group of programs if needed. -func InjectMonitoring(agentInfo *info.AgentInfo, outputGroup string, rootAst *transpiler.AST, programsToRun []program.Program) ([]program.Program, error) { - var err error - monitoringProgram := program.Program{ - Spec: program.Spec{ - Name: MonitoringName, - Cmd: MonitoringName, - }, - } - - // if monitoring is not specified use default one where everything is enabled - if _, found := transpiler.Lookup(rootAst, monitoringKey); !found { - monitoringNode := transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("enabled", transpiler.NewBoolVal(true)), - transpiler.NewKey("logs", transpiler.NewBoolVal(true)), - transpiler.NewKey("metrics", transpiler.NewBoolVal(true)), - transpiler.NewKey("use_output", transpiler.NewStrVal("default")), - transpiler.NewKey("namespace", transpiler.NewStrVal("default")), - }) - - transpiler.Insert(rootAst, transpiler.NewKey("monitoring", monitoringNode), "settings") - } - - // get monitoring output name to be used - monitoringOutputName, found := transpiler.LookupString(rootAst, monitoringUseOutputKey) - if !found { - monitoringOutputName = defaultOutputName - } - - typeValue, found := transpiler.LookupString(rootAst, fmt.Sprintf("%s.%s.type", outputsKey, monitoringOutputName)) - if !found { - typeValue = elasticsearchKey - } - - ast := rootAst.Clone() - if err := getMonitoringRule(monitoringOutputName, typeValue).Apply(agentInfo, ast); err != nil { - return programsToRun, err - } - - config, err := ast.Map() - if err != nil { - return programsToRun, err - } - - programList := make([]string, 0, len(programsToRun)) - cfgHash := md5.New() - for _, p := range programsToRun { - programList = append(programList, p.Spec.CommandName()) - cfgHash.Write(p.Config.Hash()) - } - // making program list and their hashes part of the config - // so it will get regenerated with every change - config[programsKey] = programList - config[monitoringChecksumKey] = fmt.Sprintf("%x", cfgHash.Sum(nil)) - - monitoringProgram.Config, err = transpiler.NewAST(config) - if err != nil { - return programsToRun, err - } - - return append(programsToRun, monitoringProgram), nil -} - -func getMonitoringRule(outputName string, t string) *transpiler.RuleList { - monitoringOutputSelector := fmt.Sprintf(monitoringOutputFormatKey, outputName) - return transpiler.NewRuleList( - transpiler.Copy(monitoringOutputSelector, outputKey), - transpiler.Rename(fmt.Sprintf("%s.%s", outputsKey, outputName), t), - transpiler.Filter(monitoringKey, programsKey, outputKey), - ) -} diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go deleted file mode 100644 index 735e27cd725..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go +++ /dev/null @@ -1,686 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package modifiers - -import ( - "fmt" - "testing" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/testutils" -) - -func TestMonitoringInjection(t *testing.T) { - tests := []struct { - name string - inputConfig map[string]interface{} - uname string - }{ - { - name: "testMonitoringInjection", - inputConfig: inputConfigMap, - uname: "monitoring-uname", - }, - { - name: "testMonitoringInjectionDefaults", - inputConfig: inputConfigMapDefaults, - uname: "xxx", - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - testMonitoringInjection(t, tc.inputConfig, tc.uname) - }) - } -} - -func testMonitoringInjection(t *testing.T, inputConfig map[string]interface{}, testUname string) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - ast, err := transpiler.NewAST(inputConfig) - if err != nil { - t.Fatal(err) - } - - programsToRun, err := program.Programs(agentInfo, ast) - if err != nil { - t.Fatal(err) - } - - if len(programsToRun) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - -GROUPLOOP: - for group, ptr := range programsToRun { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, ast, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOP - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - cm, err := p.Config.Map() - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - outputCfg, found := cm[outputKey] - if !found { - t.Errorf("output not found for '%s'", group) - continue GROUPLOOP - } - - outputMap, ok := outputCfg.(map[string]interface{}) - if !ok { - t.Errorf("output is not a map for '%s'", group) - continue GROUPLOOP - } - - esCfg, found := outputMap["elasticsearch"] - if !found { - t.Errorf("elasticsearch output not found for '%s'", group) - continue GROUPLOOP - } - - esMap, ok := esCfg.(map[string]interface{}) - if !ok { - t.Errorf("output.elasticsearch is not a map for '%s'", group) - continue GROUPLOOP - } - - if uname, found := esMap["username"]; !found { - t.Errorf("output.elasticsearch.username output not found for '%s'", group) - continue GROUPLOOP - } else if uname != testUname { - t.Errorf("output.elasticsearch.username has incorrect value expected '%s', got '%s for %s", "monitoring-uname", uname, group) - continue GROUPLOOP - } - } - } -} - -func TestMonitoringToLogstashInjection(t *testing.T) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - ast, err := transpiler.NewAST(inputConfigLS) - if err != nil { - t.Fatal(err) - } - - programsToRun, err := program.Programs(agentInfo, ast) - if err != nil { - t.Fatal(err) - } - - if len(programsToRun) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - -GROUPLOOP: - for group, ptr := range programsToRun { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, ast, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOP - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - cm, err := p.Config.Map() - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - outputCfg, found := cm[outputKey] - if !found { - t.Errorf("output not found for '%s'", group) - continue GROUPLOOP - } - - outputMap, ok := outputCfg.(map[string]interface{}) - if !ok { - t.Errorf("output is not a map for '%s'", group) - continue GROUPLOOP - } - - esCfg, found := outputMap["logstash"] - if !found { - t.Errorf("logstash output not found for '%s' %v", group, outputMap) - continue GROUPLOOP - } - - esMap, ok := esCfg.(map[string]interface{}) - if !ok { - t.Errorf("output.logstash is not a map for '%s'", group) - continue GROUPLOOP - } - - if uname, found := esMap["hosts"]; !found { - t.Errorf("output.logstash.hosts output not found for '%s'", group) - continue GROUPLOOP - } else if uname != "192.168.1.2" { - t.Errorf("output.logstash.hosts has incorrect value expected '%s', got '%s for %s", "monitoring-uname", uname, group) - continue GROUPLOOP - } - } - } -} - -func TestMonitoringInjectionDisabled(t *testing.T) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - ast, err := transpiler.NewAST(inputConfigMapDisabled) - if err != nil { - t.Fatal(err) - } - - programsToRun, err := program.Programs(agentInfo, ast) - if err != nil { - t.Fatal(err) - } - - if len(programsToRun) != 2 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 2)) - } - -GROUPLOOP: - for group, ptr := range programsToRun { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, ast, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOP - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - cm, err := p.Config.Map() - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - // is enabled set - agentObj, found := cm["agent"] - if !found { - t.Errorf("settings not found for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - agentMap, ok := agentObj.(map[string]interface{}) - if !ok { - t.Errorf("settings not a map for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - monitoringObj, found := agentMap["monitoring"] - if !found { - t.Errorf("agent.monitoring not found for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - monitoringMap, ok := monitoringObj.(map[string]interface{}) - if !ok { - t.Errorf("agent.monitoring not a map for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - enabledVal, found := monitoringMap["enabled"] - if !found { - t.Errorf("monitoring.enabled not found for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - monitoringEnabled, ok := enabledVal.(bool) - if !ok { - t.Errorf("agent.monitoring.enabled is not a bool for '%s'", group) - continue GROUPLOOP - } - - if monitoringEnabled { - t.Errorf("agent.monitoring.enabled is enabled, should be disabled for '%s'", group) - continue GROUPLOOP - } - } - } -} - -func TestChangeInMonitoringWithChangeInInput(t *testing.T) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - - astBefore, err := transpiler.NewAST(inputChange1) - if err != nil { - t.Fatal(err) - } - - programsToRunBefore, err := program.Programs(agentInfo, astBefore) - if err != nil { - t.Fatal(err) - } - - if len(programsToRunBefore) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - - astAfter, err := transpiler.NewAST(inputChange2) - if err != nil { - t.Fatal(err) - } - - programsToRunAfter, err := program.Programs(agentInfo, astAfter) - if err != nil { - t.Fatal(err) - } - - if len(programsToRunAfter) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - - // inject to both - var hashConfigBefore, hashConfigAfter string -GROUPLOOPBEFORE: - for group, ptr := range programsToRunBefore { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, astBefore, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOPBEFORE - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOPBEFORE - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - hashConfigBefore = p.Config.HashStr() - } - } - -GROUPLOOPAFTER: - for group, ptr := range programsToRunAfter { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, astAfter, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOPAFTER - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOPAFTER - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - hashConfigAfter = p.Config.HashStr() - } - } - - if hashConfigAfter == "" || hashConfigBefore == "" { - t.Fatal("hash configs uninitialized") - } - - if hashConfigAfter == hashConfigBefore { - t.Fatal("hash config equal, expected to be different") - } -} - -var inputConfigMap = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "infosec1": map[string]interface{}{ - "pass": "xxx", - "spool": map[string]interface{}{ - "file": "${path.data}/spool.dat", - }, - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - { - "type": "system/metrics", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - { - "id": "system/metrics-system.core", - "enabled": true, - "dataset": "system.core", - "period": "10s", - "metrics": []string{"percentages"}, - }, - }, - }, - }, -} - -var inputConfigMapDefaults = map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "infosec1": map[string]interface{}{ - "pass": "xxx", - "spool": map[string]interface{}{ - "file": "${path.data}/spool.dat", - }, - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - { - "type": "system/metrics", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - { - "id": "system/metrics-system.core", - "enabled": true, - "dataset": "system.core", - "period": "10s", - "metrics": []string{"percentages"}, - }, - }, - }, - }, -} - -var inputConfigMapDisabled = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": false, - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "infosec1": map[string]interface{}{ - "pass": "xxx", - "spool": map[string]interface{}{ - "file": "${path.data}/spool.dat", - }, - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - { - "type": "system/metrics", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - { - "id": "system/metrics-system.core", - "enabled": true, - "dataset": "system.core", - "period": "10s", - "metrics": []string{"percentages"}, - }, - }, - }, - }, -} - -var inputChange1 = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - }, -} - -var inputChange2 = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - {"paths": "/yyyy"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - }, -} - -var inputConfigLS = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "logstash", - "hosts": "192.168.1.2", - "ssl.certificate_authorities": []string{"/etc/pki.key"}, - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - }, -} diff --git a/internal/pkg/agent/application/pipeline/pipeline.go b/internal/pkg/agent/application/pipeline/pipeline.go deleted file mode 100644 index 764d920cff9..00000000000 --- a/internal/pkg/agent/application/pipeline/pipeline.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package pipeline - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// ConfigHandler is capable of handling configrequest. -type ConfigHandler interface { - HandleConfig(context.Context, configrequest.Request) error - Close() error - Shutdown() -} - -// DefaultRK default routing keys until we implement the routing key / config matrix. -var DefaultRK = "default" - -// RoutingKey is used for routing as pipeline id. -type RoutingKey = string - -// Router is an interface routing programs to the corresponding stream. -type Router interface { - Routes() *sorted.Set - Route(ctx context.Context, id string, grpProg map[RoutingKey][]program.Program) error - Shutdown() -} - -// StreamFunc creates a stream out of routing key. -type StreamFunc func(*logger.Logger, RoutingKey) (Stream, error) - -// Stream is capable of executing configrequest change. -type Stream interface { - Execute(context.Context, configrequest.Request) error - Close() error - Shutdown() -} - -// EmitterFunc emits configuration for processing. -type EmitterFunc func(context.Context, *config.Config) error - -// DecoratorFunc is a func for decorating a retrieved configuration before processing. -type DecoratorFunc = func(*info.AgentInfo, string, *transpiler.AST, []program.Program) ([]program.Program, error) - -// FilterFunc is a func for filtering a retrieved configuration before processing. -type FilterFunc = func(*logger.Logger, *transpiler.AST) error - -// ConfigModifiers is a collections of filters and decorators applied while processing configuration. -type ConfigModifiers struct { - Filters []FilterFunc - Decorators []DecoratorFunc -} - -// Dispatcher processes actions coming from fleet api. -type Dispatcher interface { - Dispatch(context.Context, store.FleetAcker, ...fleetapi.Action) error -} diff --git a/internal/pkg/agent/application/pipeline/router/router.go b/internal/pkg/agent/application/pipeline/router/router.go deleted file mode 100644 index e1f1d63c8b5..00000000000 --- a/internal/pkg/agent/application/pipeline/router/router.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package router - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type router struct { - log *logger.Logger - routes *sorted.Set - streamFactory pipeline.StreamFunc -} - -// New creates a new router. -func New(log *logger.Logger, factory pipeline.StreamFunc) (pipeline.Router, error) { - var err error - if log == nil { - log, err = logger.New("router", false) - if err != nil { - return nil, err - } - } - return &router{log: log, streamFactory: factory, routes: sorted.NewSet()}, nil -} - -func (r *router) Routes() *sorted.Set { - return r.routes -} - -func (r *router) Route(ctx context.Context, id string, grpProg map[pipeline.RoutingKey][]program.Program) error { - s := sorted.NewSet() - - // Make sure that starting and updating is always done in the same order. - for rk, programs := range grpProg { - s.Add(rk, programs) - } - - active := make(map[string]bool, len(grpProg)) - for _, rk := range s.Keys() { - active[rk] = true - - // Are we already runnings this streams? - // When it doesn't exist we just create it, if it already exist we forward the configuration. - p, ok := r.routes.Get(rk) - var err error - if !ok { - r.log.Debugf("Creating stream: %s", rk) - p, err = r.streamFactory(r.log, rk) - if err != nil { - return err - } - r.routes.Add(rk, p) - } - - programs, ok := s.Get(rk) - if !ok { - return fmt.Errorf("could not find programs for routing key %s", rk) - } - - req := configrequest.New(id, time.Now(), programs.([]program.Program)) - - r.log.Debugf( - "Streams %s need to run config with ID %s and programs: %s", - rk, - req.ShortID(), - strings.Join(req.ProgramNames(), ", "), - ) - - err = p.(pipeline.Stream).Execute(ctx, req) - if err != nil { - return err - } - } - - // cleanup inactive streams. - // streams are shutdown down in alphabetical order. - keys := r.routes.Keys() - for _, k := range keys { - _, ok := active[k] - if ok { - continue - } - - p, ok := r.routes.Get(k) - if !ok { - continue - } - - r.log.Debugf("Removing routing key %s", k) - - p.(pipeline.Stream).Close() - r.routes.Remove(k) - } - - return nil -} - -// Shutdown shutdowns the router because Agent is stopping. -func (r *router) Shutdown() { - keys := r.routes.Keys() - for _, k := range keys { - p, ok := r.routes.Get(k) - if !ok { - continue - } - p.(pipeline.Stream).Shutdown() - r.routes.Remove(k) - } -} diff --git a/internal/pkg/agent/application/pipeline/router/router_test.go b/internal/pkg/agent/application/pipeline/router/router_test.go deleted file mode 100644 index 75f33231b1b..00000000000 --- a/internal/pkg/agent/application/pipeline/router/router_test.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package router - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type rOp int - -const ( - createOp rOp = iota + 1 - executeOp - closeOp -) - -func (r *rOp) String() string { - m := map[rOp]string{ - 1: "create", - 2: "execute", - 3: "close", - } - v, ok := m[*r] - if !ok { - return "unknown operation" - } - return v -} - -type event struct { - rk pipeline.RoutingKey - op rOp -} - -type notifyFunc func(pipeline.RoutingKey, rOp, ...interface{}) - -func TestRouter(t *testing.T) { - programs := []program.Program{{Spec: getRandomSpec()}} - ctx := context.Background() - - t.Run("create new and destroy unused stream", func(t *testing.T) { - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - }) - - assertOps(t, []event{ - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - nk := "NEW_KEY" - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{ - nk: programs, - }) - - assertOps(t, []event{ - e(nk, createOp), - e(nk, executeOp), - e(pipeline.DefaultRK, closeOp), - }, recorder.events) - }) - - t.Run("multiples create new and destroy unused stream", func(t *testing.T) { - k1 := "KEY_1" - k2 := "KEY_2" - - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - k1: programs, - k2: programs, - }) - - assertOps(t, []event{ - e(k1, createOp), - e(k1, executeOp), - - e(k2, createOp), - e(k2, executeOp), - - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - nk := "SECOND_DISPATCH" - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{ - nk: programs, - }) - - assertOps(t, []event{ - e(nk, createOp), - e(nk, executeOp), - - e(k1, closeOp), - e(k2, closeOp), - e(pipeline.DefaultRK, closeOp), - }, recorder.events) - }) - - t.Run("create new and delegate program to existing stream", func(t *testing.T) { - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - }) - - assertOps(t, []event{ - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - }) - - assertOps(t, []event{ - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - }) - - t.Run("when no stream are detected we shutdown all the running streams", func(t *testing.T) { - k1 := "KEY_1" - k2 := "KEY_2" - - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - k1: programs, - k2: programs, - }) - - assertOps(t, []event{ - e(k1, createOp), - e(k1, executeOp), - e(k2, createOp), - e(k2, executeOp), - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{}) - - assertOps(t, []event{ - e(k1, closeOp), - e(k2, closeOp), - e(pipeline.DefaultRK, closeOp), - }, recorder.events) - }) -} - -type recorder struct { - events []event -} - -func (r *recorder) factory(_ *logger.Logger, rk pipeline.RoutingKey) (pipeline.Stream, error) { - return newMockStream(rk, r.notify), nil -} - -func (r *recorder) notify(rk pipeline.RoutingKey, op rOp, args ...interface{}) { - r.events = append(r.events, e(rk, op)) -} - -func (r *recorder) reset() { - r.events = nil -} - -type mockStream struct { - rk pipeline.RoutingKey - notify notifyFunc -} - -func newMockStream(rk pipeline.RoutingKey, notify notifyFunc) *mockStream { - notify(rk, createOp) - return &mockStream{ - rk: rk, - notify: notify, - } -} - -func (m *mockStream) Execute(_ context.Context, req configrequest.Request) error { - m.event(executeOp, req) - return nil -} - -func (m *mockStream) Close() error { - m.event(closeOp) - return nil -} - -func (m *mockStream) Shutdown() {} - -func (m *mockStream) event(op rOp, args ...interface{}) { - m.notify(m.rk, op, args...) -} - -func assertOps(t *testing.T, expected []event, received []event) { - require.Equal(t, len(expected), len(received), "Received number of operation doesn't match") - require.Equal(t, expected, received) -} - -func e(rk pipeline.RoutingKey, op rOp) event { - return event{rk: rk, op: op} -} - -func getRandomSpec() program.Spec { - return program.Supported[1] -} diff --git a/internal/pkg/agent/application/pipeline/stream/factory.go b/internal/pkg/agent/application/pipeline/stream/factory.go deleted file mode 100644 index b7701e70e99..00000000000 --- a/internal/pkg/agent/application/pipeline/stream/factory.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stream - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" - "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" - downloader "github.com/elastic/elastic-agent/internal/pkg/artifact/download/localremote" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/release" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -// Factory creates a new stream factory. -func Factory(ctx context.Context, agentInfo *info.AgentInfo, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor, statusController status.Controller) func(*logger.Logger, pipeline.RoutingKey) (pipeline.Stream, error) { - return func(log *logger.Logger, id pipeline.RoutingKey) (pipeline.Stream, error) { - // new operator per stream to isolate processes without using tags - operator, err := newOperator(ctx, log, agentInfo, id, cfg, srv, r, m, statusController) - if err != nil { - return nil, err - } - - return &operatorStream{ - log: log, - configHandler: operator, - }, nil - } -} - -func newOperator( - ctx context.Context, - log *logger.Logger, - agentInfo *info.AgentInfo, - id pipeline.RoutingKey, - config *configuration.SettingsConfig, - srv *server.Server, - r state.Reporter, - m monitoring.Monitor, - statusController status.Controller, -) (*operation.Operator, error) { - fetcher, err := downloader.NewDownloader(log, config.DownloadConfig) - if err != nil { - return nil, err - } - - allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp) - if err != nil { - return nil, errors.New(err, "initiating verifier") - } - - installer, err := install.NewInstaller(config.DownloadConfig) - if err != nil { - return nil, errors.New(err, "initiating installer") - } - - uninstaller, err := uninstall.NewUninstaller() - if err != nil { - return nil, errors.New(err, "initiating uninstaller") - } - - stateResolver, err := stateresolver.NewStateResolver(log) - if err != nil { - return nil, err - } - - return operation.NewOperator( - ctx, - log, - agentInfo, - id, - config, - fetcher, - verifier, - installer, - uninstaller, - stateResolver, - srv, - r, - m, - statusController, - ) -} diff --git a/internal/pkg/agent/application/pipeline/stream/operator_stream.go b/internal/pkg/agent/application/pipeline/stream/operator_stream.go deleted file mode 100644 index ee4ee44079e..00000000000 --- a/internal/pkg/agent/application/pipeline/stream/operator_stream.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stream - -import ( - "context" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type operatorStream struct { - configHandler pipeline.ConfigHandler - log *logger.Logger -} - -type stater interface { - State() map[string]state.State -} - -type specer interface { - Specs() map[string]program.Spec -} - -func (b *operatorStream) Close() error { - return b.configHandler.Close() -} - -func (b *operatorStream) State() map[string]state.State { - if s, ok := b.configHandler.(stater); ok { - return s.State() - } - - return nil -} - -func (b *operatorStream) Specs() map[string]program.Spec { - if s, ok := b.configHandler.(specer); ok { - return s.Specs() - } - return nil -} - -func (b *operatorStream) Execute(ctx context.Context, cfg configrequest.Request) (err error) { - span, ctx := apm.StartSpan(ctx, "route", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - return b.configHandler.HandleConfig(ctx, cfg) -} - -func (b *operatorStream) Shutdown() { - b.configHandler.Shutdown() -} diff --git a/internal/pkg/agent/application/upgrade/error_checker.go b/internal/pkg/agent/application/upgrade/error_checker.go index 099526b990b..8e308c4e080 100644 --- a/internal/pkg/agent/application/upgrade/error_checker.go +++ b/internal/pkg/agent/application/upgrade/error_checker.go @@ -64,7 +64,7 @@ func (ch *ErrorChecker) Run(ctx context.Context) { continue } - status, err := ch.agentClient.Status(ctx) + state, err := ch.agentClient.State(ctx) ch.agentClient.Disconnect() if err != nil { ch.log.Error("failed retrieving agent status", err) @@ -78,14 +78,14 @@ func (ch *ErrorChecker) Run(ctx context.Context) { // call was successful, reset counter ch.failuresCounter = 0 - if status.Status == client.Failed { + if state.State == client.Failed { ch.log.Error("error checker notifying failure of agent") ch.notifyChan <- ErrAgentStatusFailed } - for _, app := range status.Applications { - if app.Status == client.Failed { - err = multierror.Append(err, errors.New(fmt.Sprintf("application %s[%v] failed: %s", app.Name, app.ID, app.Message))) + for _, comp := range state.Components { + if comp.State == client.Failed { + err = multierror.Append(err, errors.New(fmt.Sprintf("component %s[%v] failed: %s", comp.Name, comp.ID, comp.Message))) } } diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index e176e4c5b96..51b0adbb184 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -39,7 +39,7 @@ type UpdateMarker struct { } // markUpgrade marks update happened so we can handle grace period -func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) error { +func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi.ActionUpgrade) error { prevVersion := release.Version() prevHash := release.Commit() if len(prevHash) > hashLen { @@ -51,7 +51,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) er UpdatedOn: time.Now(), PrevVersion: prevVersion, PrevHash: prevHash, - Action: action.FleetAction(), + Action: action, } markerBytes, err := yaml.Marshal(marker) diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index e2fe530ff77..4f1db73ea79 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -8,6 +8,7 @@ import ( "bytes" "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "io/ioutil" "os" "path/filepath" @@ -25,8 +26,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -47,40 +46,16 @@ var ( } ) +var ( + // ErrSameVersion error is returned when the upgrade results in the same installed version. + ErrSameVersion = errors.New("upgrade did not occur because its the same version") +) + // Upgrader performs an upgrade type Upgrader struct { - agentInfo *info.AgentInfo - settings *artifact.Config log *logger.Logger - closers []context.CancelFunc - reexec reexecManager - acker acker - reporter stateReporter + settings *artifact.Config upgradeable bool - caps capabilities.Capability -} - -// Action is the upgrade action state. -type Action interface { - // Version to upgrade to. - Version() string - // SourceURI for download. - SourceURI() string - // FleetAction is the action from fleet that started the action (optional). - FleetAction() *fleetapi.ActionUpgrade -} - -type reexecManager interface { - ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) -} - -type acker interface { - Ack(ctx context.Context, action fleetapi.Action) error - Commit(ctx context.Context) error -} - -type stateReporter interface { - OnStateChange(id string, name string, s state.State) } // IsUpgradeable when agent is installed and running as a service or flag was provided. @@ -91,17 +66,11 @@ func IsUpgradeable() bool { } // NewUpgrader creates an upgrader which is capable of performing upgrade operation -func NewUpgrader(agentInfo *info.AgentInfo, settings *artifact.Config, log *logger.Logger, closers []context.CancelFunc, reexec reexecManager, a acker, r stateReporter, caps capabilities.Capability) *Upgrader { +func NewUpgrader(log *logger.Logger, settings *artifact.Config) *Upgrader { return &Upgrader{ - agentInfo: agentInfo, - settings: settings, log: log, - closers: closers, - reexec: reexec, - acker: a, - reporter: r, + settings: settings, upgradeable: IsUpgradeable(), - caps: caps, } } @@ -112,40 +81,17 @@ func (u *Upgrader) Upgradeable() bool { // Upgrade upgrades running agent, function returns shutdown callback if some needs to be executed for cases when // reexec is called by caller. -func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ reexec.ShutdownCallbackFn, err error) { +func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) { span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() - // report failed - defer func() { - if err != nil { - if action := a.FleetAction(); action != nil { - u.reportFailure(ctx, action, err) - } - apm.CaptureError(ctx, err).Send() - } - }() - - if !u.upgradeable { - return nil, fmt.Errorf( - "cannot be upgraded; must be installed with install sub-command and " + - "running under control of the systems supervisor") - } - - if u.caps != nil { - if _, err := u.caps.Apply(a); errors.Is(err, capabilities.ErrBlocked) { - return nil, nil - } - } - - u.reportUpdating(a.Version()) - sourceURI := u.sourceURI(a.SourceURI()) - archivePath, err := u.downloadArtifact(ctx, a.Version(), sourceURI) + sourceURI = u.sourceURI(sourceURI) + archivePath, err := u.downloadArtifact(ctx, version, sourceURI) if err != nil { return nil, err } - newHash, err := u.unpack(ctx, a.Version(), archivePath) + newHash, err := u.unpack(ctx, version, archivePath) if err != nil { return nil, err } @@ -155,13 +101,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree } if strings.HasPrefix(release.Commit(), newHash) { - // not an error - if action := a.FleetAction(); action != nil { - //nolint:errcheck // keeping the same behavior, and making linter happy - u.ackAction(ctx, action) - } - u.log.Warn("upgrading to same version") - return nil, nil + return nil, ErrSameVersion } // Copy vault directory for linux/windows only @@ -182,7 +122,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, err } - if err := u.markUpgrade(ctx, newHash, a); err != nil { + if err := u.markUpgrade(ctx, newHash, action); err != nil { rollbackInstall(ctx, newHash) return nil, err } @@ -192,17 +132,12 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, errors.New("failed to invoke rollback watcher", err) } - cb := shutdownCallback(u.log, paths.Home(), release.Version(), a.Version(), release.TrimCommit(newHash)) - if reexecNow { - u.reexec.ReExec(cb) - return nil, nil - } - + cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, release.TrimCommit(newHash)) return cb, nil } // Ack acks last upgrade action -func (u *Upgrader) Ack(ctx context.Context) error { +func (u *Upgrader) Ack(ctx context.Context, acker acker.Acker) error { // get upgrade action marker, err := LoadMarker() if err != nil { @@ -216,7 +151,11 @@ func (u *Upgrader) Ack(ctx context.Context) error { return nil } - if err := u.ackAction(ctx, marker.Action); err != nil { + if err := acker.Ack(ctx, marker.Action); err != nil { + return err + } + + if err := acker.Commit(ctx); err != nil { return err } @@ -231,50 +170,6 @@ func (u *Upgrader) sourceURI(retrievedURI string) string { return u.settings.SourceURI } -// ackAction is used for successful updates, it was either updated successfully or to the same version -// so we need to remove updating state and get prevent from receiving same update action again. -func (u *Upgrader) ackAction(ctx context.Context, action fleetapi.Action) error { - if err := u.acker.Ack(ctx, action); err != nil { - return err - } - - if err := u.acker.Commit(ctx); err != nil { - return err - } - - u.reporter.OnStateChange( - "", - agentName, - state.State{Status: state.Healthy}, - ) - - return nil -} - -// report failure is used when update process fails. action is acked so it won't be received again -// and state is changed to FAILED -func (u *Upgrader) reportFailure(ctx context.Context, action fleetapi.Action, err error) { - // ack action - _ = u.acker.Ack(ctx, action) - - // report failure - u.reporter.OnStateChange( - "", - agentName, - state.State{Status: state.Failed, Message: err.Error()}, - ) -} - -// reportUpdating sets state of agent to updating. -func (u *Upgrader) reportUpdating(version string) { - // report failure - u.reporter.OnStateChange( - "", - agentName, - state.State{Status: state.Updating, Message: fmt.Sprintf("Update to version '%s' started", version)}, - ) -} - func rollbackInstall(ctx context.Context, hash string) { os.RemoveAll(filepath.Join(paths.Data(), fmt.Sprintf("%s-%s", agentName, hash))) _ = ChangeSymlink(ctx, release.ShortCommit()) diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index a886ca5bafb..77519772fe7 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -28,7 +28,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/install" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" @@ -303,7 +302,7 @@ func (c *enrollCmd) writeDelayEnroll(streams *cli.IOStreams) error { func (c *enrollCmd) fleetServerBootstrap(ctx context.Context, persistentConfig map[string]interface{}) (string, error) { c.log.Debug("verifying communication with running Elastic Agent daemon") agentRunning := true - _, err := getDaemonStatus(ctx) + _, err := getDaemonState(ctx) if err != nil { if !c.options.FleetServer.SpawnAgent { // wait longer to try and communicate with the Elastic Agent @@ -641,7 +640,7 @@ func delay(ctx context.Context, d time.Duration) { } } -func getDaemonStatus(ctx context.Context) (*client.AgentStatus, error) { +func getDaemonState(ctx context.Context) (*client.AgentState, error) { ctx, cancel := context.WithTimeout(ctx, daemonTimeout) defer cancel() daemon := client.New() @@ -650,7 +649,7 @@ func getDaemonStatus(ctx context.Context) (*client.AgentStatus, error) { return nil, err } defer daemon.Disconnect() - return daemon.Status(ctx) + return daemon.State(ctx) } type waitResult struct { @@ -680,7 +679,7 @@ func waitForAgent(ctx context.Context, timeout time.Duration) error { backOff := expBackoffWithContext(innerCtx, 1*time.Second, maxBackoff) for { backOff.Wait() - _, err := getDaemonStatus(innerCtx) + _, err := getDaemonState(innerCtx) if errors.Is(err, context.Canceled) { resChan <- waitResult{err: err} return @@ -730,7 +729,7 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat backExp := expBackoffWithContext(innerCtx, 1*time.Second, maxBackoff) for { backExp.Wait() - status, err := getDaemonStatus(innerCtx) + state, err := getDaemonState(innerCtx) if errors.Is(err, context.Canceled) { resChan <- waitResult{err: err} return @@ -750,8 +749,8 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat } continue } - app := getAppFromStatus(status, "fleet-server") - if app == nil { + unit := getCompUnitFromStatus(state, "fleet-server") + if unit == nil { err = errors.New("no fleet-server application running") log.Debugf("%s: %s", waitingForFleetServer, err) if msg != waitingForFleetServer { @@ -767,16 +766,16 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat } continue } - log.Debugf("%s: %s - %s", waitingForFleetServer, app.Status, app.Message) - if app.Status == cproto.Status_DEGRADED || app.Status == cproto.Status_HEALTHY { + log.Debugf("%s: %s - %s", waitingForFleetServer, unit.State, unit.Message) + if unit.State == client.Degraded || unit.State == client.Healthy { // app has started and is running - if app.Message != "" { - log.Infof("Fleet Server - %s", app.Message) + if unit.Message != "" { + log.Infof("Fleet Server - %s", unit.Message) } // extract the enrollment token from the status payload token := "" - if app.Payload != nil { - if enrollToken, ok := app.Payload["enrollment_token"]; ok { + if unit.Payload != nil { + if enrollToken, ok := unit.Payload["enrollment_token"]; ok { if tokenStr, ok := enrollToken.(string); ok { token = tokenStr } @@ -785,8 +784,8 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat resChan <- waitResult{enrollmentToken: token} break } - if app.Message != "" { - appMsg := fmt.Sprintf("Fleet Server - %s", app.Message) + if unit.Message != "" { + appMsg := fmt.Sprintf("Fleet Server - %s", unit.Message) if msg != appMsg { msg = appMsg msgCount = 0 @@ -827,10 +826,14 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat return res.enrollmentToken, nil } -func getAppFromStatus(status *client.AgentStatus, name string) *client.ApplicationStatus { - for _, app := range status.Applications { - if app.Name == name { - return app +func getCompUnitFromStatus(state *client.AgentState, name string) *client.ComponentUnitState { + for _, comp := range state.Components { + if comp.Name == name { + for _, unit := range comp.Units { + if unit.UnitType == client.UnitTypeInput { + return &unit + } + } } } return nil diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index f14c864659c..a320efde43e 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -16,9 +16,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" @@ -29,7 +26,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/config/operations" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/sorted" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/go-sysinfo" @@ -102,7 +98,7 @@ func printMapStringConfig(mapStr map[string]interface{}) error { if err != nil { return err } - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l, status.NewController(l)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l) if err != nil { return err } @@ -279,7 +275,7 @@ func getProgramsFromConfig(log *logger.Logger, agentInfo *info.AgentInfo, cfg *c configModifiers.Filters = append(configModifiers.Filters, modifiers.InjectFleet(cfg, sysInfo.Info(), agentInfo)) } - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, status.NewController(log)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) if err != nil { return nil, err } @@ -379,6 +375,10 @@ func (w *waitForCompose) Run(ctx context.Context) error { return err } +func (w *waitForCompose) Errors() <-chan error { + return nil +} + func (w *waitForCompose) Watch() <-chan []*transpiler.Vars { return nil } diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index e3d30e054b4..e2d3fc0e751 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -19,11 +19,7 @@ import ( apmtransport "go.elastic.co/apm/transport" "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent-libs/api" - "github.com/elastic/elastic-agent-libs/monitoring" "github.com/elastic/elastic-agent-libs/service" - "github.com/elastic/elastic-agent-system-metrics/report" - "github.com/elastic/elastic-agent/internal/pkg/agent/application" "github.com/elastic/elastic-agent/internal/pkg/agent/application/filelock" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" @@ -37,13 +33,9 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - monitoringServer "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/server" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/version" ) const ( @@ -151,8 +143,6 @@ func run(override cfgOverrider, modifiers ...application.PlatformModifier) error rexLogger := logger.Named("reexec") rex := reexec.NewManager(rexLogger, execPath) - statusCtrl := status.NewController(logger) - tracer, err := initTracer(agentName, release.Version(), cfg.Settings.MonitoringConfig) if err != nil { return fmt.Errorf("could not initiate APM tracer: %w", err) @@ -167,28 +157,27 @@ func run(override cfgOverrider, modifiers ...application.PlatformModifier) error logger.Info("APM instrumentation disabled") } - control := server.New(logger.Named("control"), rex, statusCtrl, nil, tracer) - // start the control listener - if err := control.Start(); err != nil { - return err - } - defer control.Stop() - - app, err := application.New(logger, rex, statusCtrl, control, agentInfo, tracer, modifiers...) + app, err := application.New(logger, agentInfo, rex, tracer, modifiers...) if err != nil { return err } - control.SetRouteFn(app.Routes) - control.SetMonitoringCfg(cfg.Settings.MonitoringConfig) - - serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app, tracer) - if err != nil { + control := server.New(logger.Named("control"), cfg.Settings.MonitoringConfig, app, tracer) + // start the control listener + if err := control.Start(); err != nil { return err } - defer func() { - _ = serverStopFn() - }() + defer control.Stop() + + /* + serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app, tracer) + if err != nil { + return err + } + defer func() { + _ = serverStopFn() + }() + */ appDone := make(chan bool) appErrCh := make(chan error) @@ -340,6 +329,7 @@ func defaultLogLevel(cfg *configuration.Configuration) string { return defaultLogLevel } +/* func setupMetrics( _ *info.AgentInfo, logger *logger.Logger, @@ -376,6 +366,7 @@ func setupMetrics( func isProcessStatsEnabled(cfg *monitoringCfg.MonitoringHTTPConfig) bool { return cfg != nil && cfg.Enabled } +*/ func tryDelayEnroll(ctx context.Context, logger *logger.Logger, cfg *configuration.Configuration, override cfgOverrider) (*configuration.Configuration, error) { enrollPath := paths.AgentEnrollFile() diff --git a/internal/pkg/agent/cmd/status.go b/internal/pkg/agent/cmd/status.go index f3649bafdc9..2f748e6dc89 100644 --- a/internal/pkg/agent/cmd/status.go +++ b/internal/pkg/agent/cmd/status.go @@ -25,7 +25,7 @@ import ( type outputter func(io.Writer, interface{}) error var statusOutputs = map[string]outputter{ - "human": humanStatusOutput, + "human": humanStateOutput, "json": jsonOutput, "yaml": yamlOutput, } @@ -64,7 +64,7 @@ func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error innerCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - status, err := getDaemonStatus(innerCtx) + state, err := getDaemonState(innerCtx) if errors.Is(err, context.DeadlineExceeded) { return errors.New("timed out after 30 seconds trying to connect to Elastic Agent daemon") } else if errors.Is(err, context.Canceled) { @@ -73,12 +73,12 @@ func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error return fmt.Errorf("failed to communicate with Elastic Agent daemon: %w", err) } - err = outputFunc(streams.Out, status) + err = outputFunc(streams.Out, state) if err != nil { return err } // exit 0 only if the Elastic Agent daemon is healthy - if status.Status == client.Healthy { + if state.State == client.Healthy { os.Exit(0) } else { os.Exit(1) @@ -86,32 +86,32 @@ func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error return nil } -func humanStatusOutput(w io.Writer, obj interface{}) error { - status, ok := obj.(*client.AgentStatus) +func humanStateOutput(w io.Writer, obj interface{}) error { + status, ok := obj.(*client.AgentState) if !ok { return fmt.Errorf("unable to cast %T as *client.AgentStatus", obj) } - return outputStatus(w, status) + return outputState(w, status) } -func outputStatus(w io.Writer, status *client.AgentStatus) error { - fmt.Fprintf(w, "Status: %s\n", status.Status) - if status.Message == "" { +func outputState(w io.Writer, state *client.AgentState) error { + fmt.Fprintf(w, "State: %s\n", state.State) + if state.Message == "" { fmt.Fprint(w, "Message: (no message)\n") } else { - fmt.Fprintf(w, "Message: %s\n", status.Message) + fmt.Fprintf(w, "Message: %s\n", state.Message) } - if len(status.Applications) == 0 { - fmt.Fprint(w, "Applications: (none)\n") + if len(state.Components) == 0 { + fmt.Fprint(w, "Components: (none)\n") } else { - fmt.Fprint(w, "Applications:\n") + fmt.Fprint(w, "Components:\n") tw := tabwriter.NewWriter(w, 4, 1, 2, ' ', 0) - for _, app := range status.Applications { - fmt.Fprintf(tw, " * %s\t(%s)\n", app.Name, app.Status) - if app.Message == "" { + for _, comp := range state.Components { + fmt.Fprintf(tw, " * %s\t(%s)\n", comp.Name, comp.State) + if comp.Message == "" { fmt.Fprint(tw, "\t(no message)\n") } else { - fmt.Fprintf(tw, "\t%s\n", app.Message) + fmt.Fprintf(tw, "\t%s\n", comp.Message) } } tw.Flush() diff --git a/internal/pkg/agent/control/client/client.go b/internal/pkg/agent/control/client/client.go index 728e830b462..634cc25a5af 100644 --- a/internal/pkg/agent/control/client/client.go +++ b/internal/pkg/agent/control/client/client.go @@ -15,24 +15,38 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" ) -// Status is the status of the Elastic Agent -type Status = cproto.Status +// UnitType is the type of the unit +type UnitType = cproto.UnitType + +// State is the state codes +type State = cproto.State + +const ( + // UnitTypeInput is an input unit. + UnitTypeInput UnitType = cproto.UnitType_INPUT + // UnitTypeOutput is an output unit. + UnitTypeOutput UnitType = cproto.UnitType_OUTPUT +) const ( // Starting is when the it is still starting. - Starting Status = cproto.Status_STARTING + Starting State = cproto.State_STARTING // Configuring is when it is configuring. - Configuring Status = cproto.Status_CONFIGURING + Configuring State = cproto.State_CONFIGURING // Healthy is when it is healthy. - Healthy Status = cproto.Status_HEALTHY + Healthy State = cproto.State_HEALTHY // Degraded is when it is degraded. - Degraded Status = cproto.Status_DEGRADED + Degraded State = cproto.State_DEGRADED // Failed is when it is failed. - Failed Status = cproto.Status_FAILED + Failed State = cproto.State_FAILED // Stopping is when it is stopping. - Stopping Status = cproto.Status_STOPPING + Stopping State = cproto.State_STOPPING + // Stopped is when it is stopped. + Stopped State = cproto.State_STOPPED // Upgrading is when it is upgrading. - Upgrading Status = cproto.Status_UPGRADING + Upgrading State = cproto.State_UPGRADING + // Rollback is when it is upgrading is rolling back. + Rollback State = cproto.State_ROLLBACK ) // Version is the current running version of the daemon. @@ -43,14 +57,29 @@ type Version struct { Snapshot bool } -// ApplicationStatus is a status of an application managed by the Elastic Agent. -// TODO(Anderson): Implement sort.Interface and sort it. -type ApplicationStatus struct { - ID string - Name string - Status Status - Message string - Payload map[string]interface{} +// ComponentUnitState is a state of a unit running inside a component. +type ComponentUnitState struct { + UnitID string `json:"unit_id" yaml:"unit_id"` + UnitType UnitType `json:"unit_type" yaml:"unit_type"` + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Payload map[string]interface{} `json:"payload,omitempty" yaml:"payload,omitempty"` +} + +// ComponentState is a state of a component managed by the Elastic Agent. +type ComponentState struct { + ID string `json:"id" yaml:"id"` + Name string `json:"name" yaml:"name"` + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Units []ComponentUnitState `json:"units" yaml:"units"` +} + +// AgentState is the current state of the Elastic Agent. +type AgentState struct { + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Components []ComponentState `json:"components" yaml:"components"` } // ProcMeta is the running version and ID information for a running process. @@ -80,13 +109,6 @@ type ProcPProf struct { Error string } -// AgentStatus is the current status of the Elastic Agent. -type AgentStatus struct { - Status Status - Message string - Applications []*ApplicationStatus -} - // Client communicates to Elastic Agent through the control protocol. type Client interface { // Connect connects to the running Elastic Agent. @@ -95,8 +117,8 @@ type Client interface { Disconnect() // Version returns the current version of the running agent. Version(ctx context.Context) (Version, error) - // Status returns the current status of the running agent. - Status(ctx context.Context) (*AgentStatus, error) + // State returns the current state of the running agent. + State(ctx context.Context) (*AgentState, error) // Restart triggers restarting the current running daemon. Restart(ctx context.Context) error // Upgrade triggers upgrade of the current running daemon. @@ -161,32 +183,42 @@ func (c *client) Version(ctx context.Context) (Version, error) { }, nil } -// Status returns the current status of the running agent. -func (c *client) Status(ctx context.Context) (*AgentStatus, error) { - res, err := c.client.Status(ctx, &cproto.Empty{}) +// State returns the current state of the running agent. +func (c *client) State(ctx context.Context) (*AgentState, error) { + res, err := c.client.State(ctx, &cproto.Empty{}) if err != nil { return nil, err } - s := &AgentStatus{ - Status: res.Status, - Message: res.Message, - Applications: make([]*ApplicationStatus, len(res.Applications)), + s := &AgentState{ + State: res.State, + Message: res.Message, + Components: make([]ComponentState, 0, len(res.Components)), } - for i, appRes := range res.Applications { - var payload map[string]interface{} - if appRes.Payload != "" { - err := json.Unmarshal([]byte(appRes.Payload), &payload) - if err != nil { - return nil, err + for _, comp := range res.Components { + units := make([]ComponentUnitState, 0, len(comp.Units)) + for _, unit := range comp.Units { + var payload map[string]interface{} + if unit.Payload != "" { + err := json.Unmarshal([]byte(unit.Payload), &payload) + if err != nil { + return nil, err + } } + units = append(units, ComponentUnitState{ + UnitID: unit.UnitId, + UnitType: unit.UnitType, + State: unit.State, + Message: unit.Message, + Payload: payload, + }) } - s.Applications[i] = &ApplicationStatus{ - ID: appRes.Id, - Name: appRes.Name, - Status: appRes.Status, - Message: appRes.Message, - Payload: payload, - } + s.Components = append(s.Components, ComponentState{ + ID: comp.Id, + Name: comp.Name, + State: comp.State, + Message: comp.Message, + Units: units, + }) } return s, nil } diff --git a/internal/pkg/agent/control/cproto/control.pb.go b/internal/pkg/agent/control/cproto/control.pb.go index 43609b68f0a..8b15dd2f5ac 100644 --- a/internal/pkg/agent/control/cproto/control.pb.go +++ b/internal/pkg/agent/control/cproto/control.pb.go @@ -11,11 +11,10 @@ package cproto import ( - reflect "reflect" - sync "sync" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) const ( @@ -25,71 +24,121 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Status codes for the current state. -type Status int32 +// State codes for the current state. +type State int32 const ( - Status_STARTING Status = 0 - Status_CONFIGURING Status = 1 - Status_HEALTHY Status = 2 - Status_DEGRADED Status = 3 - Status_FAILED Status = 4 - Status_STOPPING Status = 5 - Status_UPGRADING Status = 6 - Status_ROLLBACK Status = 7 + State_STARTING State = 0 + State_CONFIGURING State = 1 + State_HEALTHY State = 2 + State_DEGRADED State = 3 + State_FAILED State = 4 + State_STOPPING State = 5 + State_STOPPED State = 6 + State_UPGRADING State = 7 + State_ROLLBACK State = 8 ) -// Enum value maps for Status. +// Enum value maps for State. var ( - Status_name = map[int32]string{ + State_name = map[int32]string{ 0: "STARTING", 1: "CONFIGURING", 2: "HEALTHY", 3: "DEGRADED", 4: "FAILED", 5: "STOPPING", - 6: "UPGRADING", - 7: "ROLLBACK", + 6: "STOPPED", + 7: "UPGRADING", + 8: "ROLLBACK", } - Status_value = map[string]int32{ + State_value = map[string]int32{ "STARTING": 0, "CONFIGURING": 1, "HEALTHY": 2, "DEGRADED": 3, "FAILED": 4, "STOPPING": 5, - "UPGRADING": 6, - "ROLLBACK": 7, + "STOPPED": 6, + "UPGRADING": 7, + "ROLLBACK": 8, } ) -func (x Status) Enum() *Status { - p := new(Status) +func (x State) Enum() *State { + p := new(State) *p = x return p } -func (x Status) String() string { +func (x State) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (Status) Descriptor() protoreflect.EnumDescriptor { +func (State) Descriptor() protoreflect.EnumDescriptor { return file_control_proto_enumTypes[0].Descriptor() } -func (Status) Type() protoreflect.EnumType { +func (State) Type() protoreflect.EnumType { return &file_control_proto_enumTypes[0] } -func (x Status) Number() protoreflect.EnumNumber { +func (x State) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use Status.Descriptor instead. -func (Status) EnumDescriptor() ([]byte, []int) { +// Deprecated: Use State.Descriptor instead. +func (State) EnumDescriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{0} } +// Unit Type running inside a component. +type UnitType int32 + +const ( + UnitType_INPUT UnitType = 0 + UnitType_OUTPUT UnitType = 1 +) + +// Enum value maps for UnitType. +var ( + UnitType_name = map[int32]string{ + 0: "INPUT", + 1: "OUTPUT", + } + UnitType_value = map[string]int32{ + "INPUT": 0, + "OUTPUT": 1, + } +) + +func (x UnitType) Enum() *UnitType { + p := new(UnitType) + *p = x + return p +} + +func (x UnitType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UnitType) Descriptor() protoreflect.EnumDescriptor { + return file_control_proto_enumTypes[1].Descriptor() +} + +func (UnitType) Type() protoreflect.EnumType { + return &file_control_proto_enumTypes[1] +} + +func (x UnitType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UnitType.Descriptor instead. +func (UnitType) EnumDescriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{1} +} + // Action status codes for restart and upgrade response. type ActionStatus int32 @@ -123,11 +172,11 @@ func (x ActionStatus) String() string { } func (ActionStatus) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[1].Descriptor() + return file_control_proto_enumTypes[2].Descriptor() } func (ActionStatus) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[1] + return &file_control_proto_enumTypes[2] } func (x ActionStatus) Number() protoreflect.EnumNumber { @@ -136,7 +185,7 @@ func (x ActionStatus) Number() protoreflect.EnumNumber { // Deprecated: Use ActionStatus.Descriptor instead. func (ActionStatus) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{1} + return file_control_proto_rawDescGZIP(), []int{2} } // pprof endpoint that can be requested. @@ -191,11 +240,11 @@ func (x PprofOption) String() string { } func (PprofOption) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[2].Descriptor() + return file_control_proto_enumTypes[3].Descriptor() } func (PprofOption) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[2] + return &file_control_proto_enumTypes[3] } func (x PprofOption) Number() protoreflect.EnumNumber { @@ -204,7 +253,7 @@ func (x PprofOption) Number() protoreflect.EnumNumber { // Deprecated: Use PprofOption.Descriptor instead. func (PprofOption) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{2} + return file_control_proto_rawDescGZIP(), []int{3} } // Empty message. @@ -511,26 +560,25 @@ func (x *UpgradeResponse) GetError() string { return "" } -// Current status of the application in Elastic Agent. -type ApplicationStatus struct { +type ComponentUnitState struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Unique application ID. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Application name. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Current status. - Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=cproto.Status" json:"status,omitempty"` - // Current status message. + // Type of unit in the component. + UnitType UnitType `protobuf:"varint,1,opt,name=unit_type,json=unitType,proto3,enum=cproto.UnitType" json:"unit_type,omitempty"` + // ID of the unit in the component. + UnitId string `protobuf:"bytes,2,opt,name=unit_id,json=unitId,proto3" json:"unit_id,omitempty"` + // Current state. + State State `protobuf:"varint,3,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` + // Current state message. Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` - // Current status payload. + // Current state payload. Payload string `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"` } -func (x *ApplicationStatus) Reset() { - *x = ApplicationStatus{} +func (x *ComponentUnitState) Reset() { + *x = ComponentUnitState{} if protoimpl.UnsafeEnabled { mi := &file_control_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -538,13 +586,13 @@ func (x *ApplicationStatus) Reset() { } } -func (x *ApplicationStatus) String() string { +func (x *ComponentUnitState) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplicationStatus) ProtoMessage() {} +func (*ComponentUnitState) ProtoMessage() {} -func (x *ApplicationStatus) ProtoReflect() protoreflect.Message { +func (x *ComponentUnitState) ProtoReflect() protoreflect.Message { mi := &file_control_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -556,44 +604,205 @@ func (x *ApplicationStatus) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplicationStatus.ProtoReflect.Descriptor instead. -func (*ApplicationStatus) Descriptor() ([]byte, []int) { +// Deprecated: Use ComponentUnitState.ProtoReflect.Descriptor instead. +func (*ComponentUnitState) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{5} } -func (x *ApplicationStatus) GetId() string { +func (x *ComponentUnitState) GetUnitType() UnitType { + if x != nil { + return x.UnitType + } + return UnitType_INPUT +} + +func (x *ComponentUnitState) GetUnitId() string { + if x != nil { + return x.UnitId + } + return "" +} + +func (x *ComponentUnitState) GetState() State { + if x != nil { + return x.State + } + return State_STARTING +} + +func (x *ComponentUnitState) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *ComponentUnitState) GetPayload() string { + if x != nil { + return x.Payload + } + return "" +} + +// Version information reported by the component to Elastic Agent. +type ComponentVersionInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the component. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Version of the component. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Extra meta information about the version. + Meta map[string]string `protobuf:"bytes,3,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ComponentVersionInfo) Reset() { + *x = ComponentVersionInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComponentVersionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComponentVersionInfo) ProtoMessage() {} + +func (x *ComponentVersionInfo) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComponentVersionInfo.ProtoReflect.Descriptor instead. +func (*ComponentVersionInfo) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{6} +} + +func (x *ComponentVersionInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ComponentVersionInfo) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ComponentVersionInfo) GetMeta() map[string]string { + if x != nil { + return x.Meta + } + return nil +} + +// Current state of a running component by Elastic Agent. +type ComponentState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique component ID. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Component name. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Current state. + State State `protobuf:"varint,3,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` + // Current state message. + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + // Current units running in the component. + Units []*ComponentUnitState `protobuf:"bytes,5,rep,name=units,proto3" json:"units,omitempty"` + // Current version information for the running component. + VersionInfo *ComponentVersionInfo `protobuf:"bytes,6,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` +} + +func (x *ComponentState) Reset() { + *x = ComponentState{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComponentState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComponentState) ProtoMessage() {} + +func (x *ComponentState) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComponentState.ProtoReflect.Descriptor instead. +func (*ComponentState) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{7} +} + +func (x *ComponentState) GetId() string { if x != nil { return x.Id } return "" } -func (x *ApplicationStatus) GetName() string { +func (x *ComponentState) GetName() string { if x != nil { return x.Name } return "" } -func (x *ApplicationStatus) GetStatus() Status { +func (x *ComponentState) GetState() State { if x != nil { - return x.Status + return x.State } - return Status_STARTING + return State_STARTING } -func (x *ApplicationStatus) GetMessage() string { +func (x *ComponentState) GetMessage() string { if x != nil { return x.Message } return "" } -func (x *ApplicationStatus) GetPayload() string { +func (x *ComponentState) GetUnits() []*ComponentUnitState { if x != nil { - return x.Payload + return x.Units } - return "" + return nil +} + +func (x *ComponentState) GetVersionInfo() *ComponentVersionInfo { + if x != nil { + return x.VersionInfo + } + return nil } // Current metadata for a running process. @@ -622,7 +831,7 @@ type ProcMeta struct { func (x *ProcMeta) Reset() { *x = ProcMeta{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[6] + mi := &file_control_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -635,7 +844,7 @@ func (x *ProcMeta) String() string { func (*ProcMeta) ProtoMessage() {} func (x *ProcMeta) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[6] + mi := &file_control_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -648,7 +857,7 @@ func (x *ProcMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use ProcMeta.ProtoReflect.Descriptor instead. func (*ProcMeta) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{6} + return file_control_proto_rawDescGZIP(), []int{8} } func (x *ProcMeta) GetProcess() string { @@ -756,37 +965,37 @@ func (x *ProcMeta) GetError() string { return "" } -// Status is the current status of Elastic Agent. -type StatusResponse struct { +// StateResponse is the current state of Elastic Agent. +type StateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Overall status of Elastic Agent. - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=cproto.Status" json:"status,omitempty"` + // Overall state of Elastic Agent. + State State `protobuf:"varint,1,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` // Overall status message of Elastic Agent. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // Status of each application in Elastic Agent. - Applications []*ApplicationStatus `protobuf:"bytes,3,rep,name=applications,proto3" json:"applications,omitempty"` + // Status of each component in Elastic Agent. + Components []*ComponentState `protobuf:"bytes,3,rep,name=components,proto3" json:"components,omitempty"` } -func (x *StatusResponse) Reset() { - *x = StatusResponse{} +func (x *StateResponse) Reset() { + *x = StateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[7] + mi := &file_control_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StatusResponse) String() string { +func (x *StateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StatusResponse) ProtoMessage() {} +func (*StateResponse) ProtoMessage() {} -func (x *StatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[7] +func (x *StateResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -797,28 +1006,28 @@ func (x *StatusResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. -func (*StatusResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{7} +// Deprecated: Use StateResponse.ProtoReflect.Descriptor instead. +func (*StateResponse) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{9} } -func (x *StatusResponse) GetStatus() Status { +func (x *StateResponse) GetState() State { if x != nil { - return x.Status + return x.State } - return Status_STARTING + return State_STARTING } -func (x *StatusResponse) GetMessage() string { +func (x *StateResponse) GetMessage() string { if x != nil { return x.Message } return "" } -func (x *StatusResponse) GetApplications() []*ApplicationStatus { +func (x *StateResponse) GetComponents() []*ComponentState { if x != nil { - return x.Applications + return x.Components } return nil } @@ -835,7 +1044,7 @@ type ProcMetaResponse struct { func (x *ProcMetaResponse) Reset() { *x = ProcMetaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[8] + mi := &file_control_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -848,7 +1057,7 @@ func (x *ProcMetaResponse) String() string { func (*ProcMetaResponse) ProtoMessage() {} func (x *ProcMetaResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[8] + mi := &file_control_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -861,7 +1070,7 @@ func (x *ProcMetaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ProcMetaResponse.ProtoReflect.Descriptor instead. func (*ProcMetaResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{8} + return file_control_proto_rawDescGZIP(), []int{10} } func (x *ProcMetaResponse) GetProcs() []*ProcMeta { @@ -890,7 +1099,7 @@ type PprofRequest struct { func (x *PprofRequest) Reset() { *x = PprofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[9] + mi := &file_control_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -903,7 +1112,7 @@ func (x *PprofRequest) String() string { func (*PprofRequest) ProtoMessage() {} func (x *PprofRequest) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[9] + mi := &file_control_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -916,7 +1125,7 @@ func (x *PprofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PprofRequest.ProtoReflect.Descriptor instead. func (*PprofRequest) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{9} + return file_control_proto_rawDescGZIP(), []int{11} } func (x *PprofRequest) GetPprofType() []PprofOption { @@ -963,7 +1172,7 @@ type PprofResult struct { func (x *PprofResult) Reset() { *x = PprofResult{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[10] + mi := &file_control_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -976,7 +1185,7 @@ func (x *PprofResult) String() string { func (*PprofResult) ProtoMessage() {} func (x *PprofResult) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[10] + mi := &file_control_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -989,7 +1198,7 @@ func (x *PprofResult) ProtoReflect() protoreflect.Message { // Deprecated: Use PprofResult.ProtoReflect.Descriptor instead. func (*PprofResult) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{10} + return file_control_proto_rawDescGZIP(), []int{12} } func (x *PprofResult) GetAppName() string { @@ -1039,7 +1248,7 @@ type PprofResponse struct { func (x *PprofResponse) Reset() { *x = PprofResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[11] + mi := &file_control_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1052,7 +1261,7 @@ func (x *PprofResponse) String() string { func (*PprofResponse) ProtoMessage() {} func (x *PprofResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[11] + mi := &file_control_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1065,7 +1274,7 @@ func (x *PprofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PprofResponse.ProtoReflect.Descriptor instead. func (*PprofResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{11} + return file_control_proto_rawDescGZIP(), []int{13} } func (x *PprofResponse) GetResults() []*PprofResult { @@ -1090,7 +1299,7 @@ type MetricsResponse struct { func (x *MetricsResponse) Reset() { *x = MetricsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[12] + mi := &file_control_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1103,7 +1312,7 @@ func (x *MetricsResponse) String() string { func (*MetricsResponse) ProtoMessage() {} func (x *MetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[12] + mi := &file_control_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1116,7 +1325,7 @@ func (x *MetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. func (*MetricsResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{12} + return file_control_proto_rawDescGZIP(), []int{14} } func (x *MetricsResponse) GetAppName() string { @@ -1159,7 +1368,7 @@ type ProcMetricsResponse struct { func (x *ProcMetricsResponse) Reset() { *x = ProcMetricsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[13] + mi := &file_control_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1172,7 +1381,7 @@ func (x *ProcMetricsResponse) String() string { func (*ProcMetricsResponse) ProtoMessage() {} func (x *ProcMetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[13] + mi := &file_control_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1185,7 +1394,7 @@ func (x *ProcMetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ProcMetricsResponse.ProtoReflect.Descriptor instead. func (*ProcMetricsResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{13} + return file_control_proto_rawDescGZIP(), []int{15} } func (x *ProcMetricsResponse) GetResult() []*MetricsResponse { @@ -1225,139 +1434,170 @@ var file_control_proto_rawDesc = []byte{ 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x22, 0x93, 0x01, 0x0a, 0x11, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb5, 0x03, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x49, - 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1d, - 0x0a, 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, 0x72, 0x47, 0x69, 0x64, 0x12, 0x22, 0x0a, - 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, - 0x0a, 0x10, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, - 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, - 0x63, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, - 0x91, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x3a, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, - 0x9d, 0x01, 0x0a, 0x0c, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, - 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, - 0xa4, 0x01, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, - 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3e, 0x0a, 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x75, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, - 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, - 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x46, 0x0a, - 0x13, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2a, 0x79, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, - 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, - 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, - 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, - 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, - 0x47, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, - 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x07, - 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, - 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, - 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, - 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, - 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, - 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, - 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8e, 0x03, 0x0a, 0x13, - 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, - 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, - 0x74, 0x61, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x18, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, - 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x50, - 0x70, 0x72, 0x6f, 0x66, 0x12, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x1b, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x22, 0xb5, 0x01, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, + 0x6e, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, + 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, + 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x14, 0x43, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, + 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x75, + 0x6e, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x69, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x3f, 0x0a, + 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb5, + 0x03, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x70, 0x68, + 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, + 0x72, 0x47, 0x69, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, + 0x74, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, + 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0x3a, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x0c, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x09, + 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, + 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x0b, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, + 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x3e, 0x0a, 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x22, 0x75, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x46, 0x0a, 0x13, 0x50, 0x72, 0x6f, + 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, + 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, + 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, + 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, + 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, + 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, + 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, + 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, + 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, + 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, + 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, + 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, + 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, + 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8c, 0x03, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, + 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, + 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, + 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x33, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x0d, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x12, 0x14, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x50, + 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1372,57 +1612,66 @@ func file_control_proto_rawDescGZIP() []byte { return file_control_proto_rawDescData } -var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_control_proto_goTypes = []interface{}{ - (Status)(0), // 0: cproto.Status - (ActionStatus)(0), // 1: cproto.ActionStatus - (PprofOption)(0), // 2: cproto.PprofOption - (*Empty)(nil), // 3: cproto.Empty - (*VersionResponse)(nil), // 4: cproto.VersionResponse - (*RestartResponse)(nil), // 5: cproto.RestartResponse - (*UpgradeRequest)(nil), // 6: cproto.UpgradeRequest - (*UpgradeResponse)(nil), // 7: cproto.UpgradeResponse - (*ApplicationStatus)(nil), // 8: cproto.ApplicationStatus - (*ProcMeta)(nil), // 9: cproto.ProcMeta - (*StatusResponse)(nil), // 10: cproto.StatusResponse - (*ProcMetaResponse)(nil), // 11: cproto.ProcMetaResponse - (*PprofRequest)(nil), // 12: cproto.PprofRequest - (*PprofResult)(nil), // 13: cproto.PprofResult - (*PprofResponse)(nil), // 14: cproto.PprofResponse - (*MetricsResponse)(nil), // 15: cproto.MetricsResponse - (*ProcMetricsResponse)(nil), // 16: cproto.ProcMetricsResponse + (State)(0), // 0: cproto.State + (UnitType)(0), // 1: cproto.UnitType + (ActionStatus)(0), // 2: cproto.ActionStatus + (PprofOption)(0), // 3: cproto.PprofOption + (*Empty)(nil), // 4: cproto.Empty + (*VersionResponse)(nil), // 5: cproto.VersionResponse + (*RestartResponse)(nil), // 6: cproto.RestartResponse + (*UpgradeRequest)(nil), // 7: cproto.UpgradeRequest + (*UpgradeResponse)(nil), // 8: cproto.UpgradeResponse + (*ComponentUnitState)(nil), // 9: cproto.ComponentUnitState + (*ComponentVersionInfo)(nil), // 10: cproto.ComponentVersionInfo + (*ComponentState)(nil), // 11: cproto.ComponentState + (*ProcMeta)(nil), // 12: cproto.ProcMeta + (*StateResponse)(nil), // 13: cproto.StateResponse + (*ProcMetaResponse)(nil), // 14: cproto.ProcMetaResponse + (*PprofRequest)(nil), // 15: cproto.PprofRequest + (*PprofResult)(nil), // 16: cproto.PprofResult + (*PprofResponse)(nil), // 17: cproto.PprofResponse + (*MetricsResponse)(nil), // 18: cproto.MetricsResponse + (*ProcMetricsResponse)(nil), // 19: cproto.ProcMetricsResponse + nil, // 20: cproto.ComponentVersionInfo.MetaEntry } var file_control_proto_depIdxs = []int32{ - 1, // 0: cproto.RestartResponse.status:type_name -> cproto.ActionStatus - 1, // 1: cproto.UpgradeResponse.status:type_name -> cproto.ActionStatus - 0, // 2: cproto.ApplicationStatus.status:type_name -> cproto.Status - 0, // 3: cproto.StatusResponse.status:type_name -> cproto.Status - 8, // 4: cproto.StatusResponse.applications:type_name -> cproto.ApplicationStatus - 9, // 5: cproto.ProcMetaResponse.procs:type_name -> cproto.ProcMeta - 2, // 6: cproto.PprofRequest.pprofType:type_name -> cproto.PprofOption - 2, // 7: cproto.PprofResult.pprofType:type_name -> cproto.PprofOption - 13, // 8: cproto.PprofResponse.results:type_name -> cproto.PprofResult - 15, // 9: cproto.ProcMetricsResponse.result:type_name -> cproto.MetricsResponse - 3, // 10: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty - 3, // 11: cproto.ElasticAgentControl.Status:input_type -> cproto.Empty - 3, // 12: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty - 6, // 13: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest - 3, // 14: cproto.ElasticAgentControl.ProcMeta:input_type -> cproto.Empty - 12, // 15: cproto.ElasticAgentControl.Pprof:input_type -> cproto.PprofRequest - 3, // 16: cproto.ElasticAgentControl.ProcMetrics:input_type -> cproto.Empty - 4, // 17: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse - 10, // 18: cproto.ElasticAgentControl.Status:output_type -> cproto.StatusResponse - 5, // 19: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse - 7, // 20: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse - 11, // 21: cproto.ElasticAgentControl.ProcMeta:output_type -> cproto.ProcMetaResponse - 14, // 22: cproto.ElasticAgentControl.Pprof:output_type -> cproto.PprofResponse - 16, // 23: cproto.ElasticAgentControl.ProcMetrics:output_type -> cproto.ProcMetricsResponse - 17, // [17:24] is the sub-list for method output_type - 10, // [10:17] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 2, // 0: cproto.RestartResponse.status:type_name -> cproto.ActionStatus + 2, // 1: cproto.UpgradeResponse.status:type_name -> cproto.ActionStatus + 1, // 2: cproto.ComponentUnitState.unit_type:type_name -> cproto.UnitType + 0, // 3: cproto.ComponentUnitState.state:type_name -> cproto.State + 20, // 4: cproto.ComponentVersionInfo.meta:type_name -> cproto.ComponentVersionInfo.MetaEntry + 0, // 5: cproto.ComponentState.state:type_name -> cproto.State + 9, // 6: cproto.ComponentState.units:type_name -> cproto.ComponentUnitState + 10, // 7: cproto.ComponentState.version_info:type_name -> cproto.ComponentVersionInfo + 0, // 8: cproto.StateResponse.state:type_name -> cproto.State + 11, // 9: cproto.StateResponse.components:type_name -> cproto.ComponentState + 12, // 10: cproto.ProcMetaResponse.procs:type_name -> cproto.ProcMeta + 3, // 11: cproto.PprofRequest.pprofType:type_name -> cproto.PprofOption + 3, // 12: cproto.PprofResult.pprofType:type_name -> cproto.PprofOption + 16, // 13: cproto.PprofResponse.results:type_name -> cproto.PprofResult + 18, // 14: cproto.ProcMetricsResponse.result:type_name -> cproto.MetricsResponse + 4, // 15: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty + 4, // 16: cproto.ElasticAgentControl.State:input_type -> cproto.Empty + 4, // 17: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty + 7, // 18: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest + 4, // 19: cproto.ElasticAgentControl.ProcMeta:input_type -> cproto.Empty + 15, // 20: cproto.ElasticAgentControl.Pprof:input_type -> cproto.PprofRequest + 4, // 21: cproto.ElasticAgentControl.ProcMetrics:input_type -> cproto.Empty + 5, // 22: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse + 13, // 23: cproto.ElasticAgentControl.State:output_type -> cproto.StateResponse + 6, // 24: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse + 8, // 25: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse + 14, // 26: cproto.ElasticAgentControl.ProcMeta:output_type -> cproto.ProcMetaResponse + 17, // 27: cproto.ElasticAgentControl.Pprof:output_type -> cproto.PprofResponse + 19, // 28: cproto.ElasticAgentControl.ProcMetrics:output_type -> cproto.ProcMetricsResponse + 22, // [22:29] is the sub-list for method output_type + 15, // [15:22] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_control_proto_init() } @@ -1492,7 +1741,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplicationStatus); i { + switch v := v.(*ComponentUnitState); i { case 0: return &v.state case 1: @@ -1504,7 +1753,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcMeta); i { + switch v := v.(*ComponentVersionInfo); i { case 0: return &v.state case 1: @@ -1516,7 +1765,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatusResponse); i { + switch v := v.(*ComponentState); i { case 0: return &v.state case 1: @@ -1528,7 +1777,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcMetaResponse); i { + switch v := v.(*ProcMeta); i { case 0: return &v.state case 1: @@ -1540,7 +1789,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofRequest); i { + switch v := v.(*StateResponse); i { case 0: return &v.state case 1: @@ -1552,7 +1801,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofResult); i { + switch v := v.(*ProcMetaResponse); i { case 0: return &v.state case 1: @@ -1564,7 +1813,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofResponse); i { + switch v := v.(*PprofRequest); i { case 0: return &v.state case 1: @@ -1576,7 +1825,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricsResponse); i { + switch v := v.(*PprofResult); i { case 0: return &v.state case 1: @@ -1588,6 +1837,30 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PprofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProcMetricsResponse); i { case 0: return &v.state @@ -1605,8 +1878,8 @@ func file_control_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_control_proto_rawDesc, - NumEnums: 3, - NumMessages: 14, + NumEnums: 4, + NumMessages: 17, NumExtensions: 0, NumServices: 1, }, diff --git a/internal/pkg/agent/control/cproto/control_grpc.pb.go b/internal/pkg/agent/control/cproto/control_grpc.pb.go index 3365f1a6496..b3be5c72626 100644 --- a/internal/pkg/agent/control/cproto/control_grpc.pb.go +++ b/internal/pkg/agent/control/cproto/control_grpc.pb.go @@ -1,7 +1,3 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 @@ -12,7 +8,6 @@ package cproto import ( context "context" - grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" @@ -29,8 +24,8 @@ const _ = grpc.SupportPackageIsVersion7 type ElasticAgentControlClient interface { // Fetches the currently running version of the Elastic Agent. Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) - // Fetches the currently status of the Elastic Agent. - Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) + // Fetches the currently states of the Elastic Agent. + State(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StateResponse, error) // Restart restarts the current running Elastic Agent. Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) // Upgrade starts the upgrade process of Elastic Agent. @@ -60,9 +55,9 @@ func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts return out, nil } -func (c *elasticAgentControlClient) Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Status", in, out, opts...) +func (c *elasticAgentControlClient) State(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StateResponse, error) { + out := new(StateResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/State", in, out, opts...) if err != nil { return nil, err } @@ -120,8 +115,8 @@ func (c *elasticAgentControlClient) ProcMetrics(ctx context.Context, in *Empty, type ElasticAgentControlServer interface { // Fetches the currently running version of the Elastic Agent. Version(context.Context, *Empty) (*VersionResponse, error) - // Fetches the currently status of the Elastic Agent. - Status(context.Context, *Empty) (*StatusResponse, error) + // Fetches the currently states of the Elastic Agent. + State(context.Context, *Empty) (*StateResponse, error) // Restart restarts the current running Elastic Agent. Restart(context.Context, *Empty) (*RestartResponse, error) // Upgrade starts the upgrade process of Elastic Agent. @@ -142,8 +137,8 @@ type UnimplementedElasticAgentControlServer struct { func (UnimplementedElasticAgentControlServer) Version(context.Context, *Empty) (*VersionResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") } -func (UnimplementedElasticAgentControlServer) Status(context.Context, *Empty) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +func (UnimplementedElasticAgentControlServer) State(context.Context, *Empty) (*StateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method State not implemented") } func (UnimplementedElasticAgentControlServer) Restart(context.Context, *Empty) (*RestartResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Restart not implemented") @@ -191,20 +186,20 @@ func _ElasticAgentControl_Version_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } -func _ElasticAgentControl_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _ElasticAgentControl_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ElasticAgentControlServer).Status(ctx, in) + return srv.(ElasticAgentControlServer).State(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/Status", + FullMethod: "/cproto.ElasticAgentControl/State", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).Status(ctx, req.(*Empty)) + return srv.(ElasticAgentControlServer).State(ctx, req.(*Empty)) } return interceptor(ctx, in, info, handler) } @@ -311,8 +306,8 @@ var ElasticAgentControl_ServiceDesc = grpc.ServiceDesc{ Handler: _ElasticAgentControl_Version_Handler, }, { - MethodName: "Status", - Handler: _ElasticAgentControl_Status_Handler, + MethodName: "State", + Handler: _ElasticAgentControl_State_Handler, }, { MethodName: "Restart", diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 6d3e5181729..3b2de4adf41 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -8,31 +8,18 @@ import ( "context" "encoding/json" "fmt" - "io" - "net" - "net/http" - "runtime" - "strings" - "sync" - "time" - + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "google.golang.org/grpc" + "net" + "sync" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/control" "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - monitoring "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/socket" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/release" - "github.com/elastic/elastic-agent/internal/pkg/sorted" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -43,11 +30,8 @@ type Server struct { cproto.UnimplementedElasticAgentControlServer logger *logger.Logger - rex reexec.ExecManager - statusCtrl status.Controller - up *upgrade.Upgrader - routeFn func() *sorted.Set monitoringCfg *monitoringCfg.MonitoringConfig + coord *coordinator.Coordinator listener net.Listener server *grpc.Server tracer *apm.Tracer @@ -65,38 +49,15 @@ type specInfo struct { } // New creates a new control protocol server. -func New(log *logger.Logger, rex reexec.ExecManager, statusCtrl status.Controller, up *upgrade.Upgrader, tracer *apm.Tracer) *Server { +func New(log *logger.Logger, cfg *monitoringCfg.MonitoringConfig, coord *coordinator.Coordinator, tracer *apm.Tracer) *Server { return &Server{ - logger: log, - rex: rex, - statusCtrl: statusCtrl, - tracer: tracer, - up: up, + logger: log, + monitoringCfg: cfg, + coord: coord, + tracer: tracer, } } -// SetUpgrader changes the upgrader. -func (s *Server) SetUpgrader(up *upgrade.Upgrader) { - s.lock.Lock() - defer s.lock.Unlock() - s.up = up -} - -// SetRouteFn changes the route retrieval function. -func (s *Server) SetRouteFn(routesFetchFn func() *sorted.Set) { - s.lock.Lock() - defer s.lock.Unlock() - s.routeFn = routesFetchFn -} - -// SetMonitoringCfg sets a reference to the monitoring config used by the running agent. -// the controller references this config to find out if pprof is enabled for the agent or not -func (s *Server) SetMonitoringCfg(cfg *monitoringCfg.MonitoringConfig) { - s.lock.Lock() - defer s.lock.Unlock() - s.monitoringCfg = cfg -} - // Start starts the GRPC endpoint and accepts new connections. func (s *Server) Start() error { if s.server != nil { @@ -149,19 +110,53 @@ func (s *Server) Version(_ context.Context, _ *cproto.Empty) (*cproto.VersionRes }, nil } -// Status returns the overall status of the agent. -func (s *Server) Status(_ context.Context, _ *cproto.Empty) (*cproto.StatusResponse, error) { - status := s.statusCtrl.Status() - return &cproto.StatusResponse{ - Status: agentStatusToProto(status.Status), - Message: status.Message, - Applications: agentAppStatusToProto(status.Applications), +// State returns the overall state of the agent. +func (s *Server) State(_ context.Context, _ *cproto.Empty) (*cproto.StateResponse, error) { + var err error + + state := s.coord.State() + components := make([]*cproto.ComponentState, 0, len(state.Components)) + for _, comp := range state.Components { + units := make([]*cproto.ComponentUnitState, 0, len(comp.State.Units)) + for key, unit := range comp.State.Units { + payload := []byte("") + if unit.Payload != nil { + payload, err = json.Marshal(unit.Payload) + if err != nil { + return nil, fmt.Errorf("failed to marshal componend %s unit %s payload: %w", comp.Component.ID, key.UnitID, err) + } + } + units = append(units, &cproto.ComponentUnitState{ + UnitType: cproto.UnitType(key.UnitType), + UnitId: key.UnitID, + State: cproto.State(unit.State), + Message: unit.Message, + Payload: string(payload), + }) + } + components = append(components, &cproto.ComponentState{ + Id: comp.Component.ID, + Name: comp.Component.Spec.BinaryName, + State: cproto.State(comp.State.State), + Message: comp.State.Message, + Units: units, + VersionInfo: &cproto.ComponentVersionInfo{ + Name: comp.State.VersionInfo.Name, + Version: comp.State.VersionInfo.Version, + Meta: comp.State.VersionInfo.Meta, + }, + }) + } + return &cproto.StateResponse{ + State: state.State, + Message: state.Message, + Components: components, }, nil } // Restart performs re-exec. func (s *Server) Restart(_ context.Context, _ *cproto.Empty) (*cproto.RestartResponse, error) { - s.rex.ReExec(nil) + s.coord.ReExec(nil) return &cproto.RestartResponse{ Status: cproto.ActionStatus_SUCCESS, }, nil @@ -169,29 +164,13 @@ func (s *Server) Restart(_ context.Context, _ *cproto.Empty) (*cproto.RestartRes // Upgrade performs the upgrade operation. func (s *Server) Upgrade(ctx context.Context, request *cproto.UpgradeRequest) (*cproto.UpgradeResponse, error) { - s.lock.RLock() - u := s.up - s.lock.RUnlock() - if u == nil { - // not running with upgrader (must be controlled by Fleet) - return &cproto.UpgradeResponse{ - Status: cproto.ActionStatus_FAILURE, - Error: "cannot be upgraded; perform upgrading using Fleet", - }, nil - } - cb, err := u.Upgrade(ctx, &upgradeRequest{request}, false) + err := s.coord.Upgrade(ctx, request.Version, request.SourceURI, nil) if err != nil { return &cproto.UpgradeResponse{ //nolint:nilerr // returns err as response Status: cproto.ActionStatus_FAILURE, Error: err.Error(), }, nil } - // perform the re-exec after a 1 second delay - // this ensures that the upgrade response over GRPC is returned - go func() { - <-time.After(time.Second) - s.rex.ReExec(cb) - }() return &cproto.UpgradeResponse{ Status: cproto.ActionStatus_SUCCESS, Version: request.Version, @@ -217,25 +196,28 @@ type BeatInfo struct { // ProcMeta returns version and beat inforation for all running processes. func (s *Server) ProcMeta(ctx context.Context, _ *cproto.Empty) (*cproto.ProcMetaResponse, error) { - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } + /* + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } - resp := &cproto.ProcMetaResponse{ - Procs: []*cproto.ProcMeta{}, - } + resp := &cproto.ProcMetaResponse{ + Procs: []*cproto.ProcMeta{}, + } - // gather spec data for all rk/apps running - specs := s.getSpecInfo("", "") - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - client := newSocketRequester(si.app, si.rk, endpoint) + // gather spec data for all rk/apps running + specs := s.getSpecInfo("", "") + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + client := newSocketRequester(si.app, si.rk, endpoint) - procMeta := client.procMeta(ctx) - resp.Procs = append(resp.Procs, procMeta) - } + procMeta := client.procMeta(ctx) + resp.Procs = append(resp.Procs, procMeta) + } - return resp, nil + return resp, nil + */ + return nil, nil } // Pprof returns /debug/pprof data for the requested applicaiont-route_key or all running applications. @@ -244,66 +226,69 @@ func (s *Server) Pprof(ctx context.Context, req *cproto.PprofRequest) (*cproto.P return nil, fmt.Errorf("agent.monitoring.pprof disabled") } - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } - - dur, err := time.ParseDuration(req.TraceDuration) - if err != nil { - return nil, fmt.Errorf("unable to parse trace duration: %w", err) - } + /* + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } - resp := &cproto.PprofResponse{ - Results: []*cproto.PprofResult{}, - } + dur, err := time.ParseDuration(req.TraceDuration) + if err != nil { + return nil, fmt.Errorf("unable to parse trace duration: %w", err) + } - var wg sync.WaitGroup - ch := make(chan *cproto.PprofResult, 1) + resp := &cproto.PprofResponse{ + Results: []*cproto.PprofResult{}, + } - // retrieve elastic-agent pprof data if requested or application is unspecified. - if req.AppName == "" || req.AppName == agentName { - endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester(agentName, "", endpoint) - for _, opt := range req.PprofType { - wg.Add(1) - go func(opt cproto.PprofOption) { - res := c.getPprof(ctx, opt, dur) - ch <- res - wg.Done() - }(opt) + var wg sync.WaitGroup + ch := make(chan *cproto.PprofResult, 1) + + // retrieve elastic-agent pprof data if requested or application is unspecified. + if req.AppName == "" || req.AppName == agentName { + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) + for _, opt := range req.PprofType { + wg.Add(1) + go func(opt cproto.PprofOption) { + res := c.getPprof(ctx, opt, dur) + ch <- res + wg.Done() + }(opt) + } } - } - // get requested rk/appname spec or all specs - var specs []specInfo - if req.AppName != agentName { - specs = s.getSpecInfo(req.RouteKey, req.AppName) - } - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - c := newSocketRequester(si.app, si.rk, endpoint) - // Launch a concurrent goroutine to gather all pprof endpoints from a socket. - for _, opt := range req.PprofType { - wg.Add(1) - go func(opt cproto.PprofOption) { - res := c.getPprof(ctx, opt, dur) - ch <- res - wg.Done() - }(opt) + // get requested rk/appname spec or all specs + var specs []specInfo + if req.AppName != agentName { + specs = s.getSpecInfo(req.RouteKey, req.AppName) + } + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + c := newSocketRequester(si.app, si.rk, endpoint) + // Launch a concurrent goroutine to gather all pprof endpoints from a socket. + for _, opt := range req.PprofType { + wg.Add(1) + go func(opt cproto.PprofOption) { + res := c.getPprof(ctx, opt, dur) + ch <- res + wg.Done() + }(opt) + } } - } - // wait for the waitgroup to be done and close the channel - go func() { - wg.Wait() - close(ch) - }() + // wait for the waitgroup to be done and close the channel + go func() { + wg.Wait() + close(ch) + }() - // gather all results from channel until closed. - for res := range ch { - resp.Results = append(resp.Results, res) - } - return resp, nil + // gather all results from channel until closed. + for res := range ch { + resp.Results = append(resp.Results, res) + } + return resp, nil + */ + return nil, nil } // ProcMetrics returns all buffered metrics data for the agent and running processes. @@ -313,32 +298,36 @@ func (s *Server) ProcMetrics(ctx context.Context, _ *cproto.Empty) (*cproto.Proc return &cproto.ProcMetricsResponse{}, nil } - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } + /* + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } - // gather metrics buffer data from the elastic-agent - endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester(agentName, "", endpoint) - metrics := c.procMetrics(ctx) + // gather metrics buffer data from the elastic-agent + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) + metrics := c.procMetrics(ctx) - resp := &cproto.ProcMetricsResponse{ - Result: []*cproto.MetricsResponse{metrics}, - } + resp := &cproto.ProcMetricsResponse{ + Result: []*cproto.MetricsResponse{metrics}, + } - // gather metrics buffer data from all other processes - specs := s.getSpecInfo("", "") - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - client := newSocketRequester(si.app, si.rk, endpoint) + // gather metrics buffer data from all other processes + specs := s.getSpecInfo("", "") + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + client := newSocketRequester(si.app, si.rk, endpoint) - s.logger.Infof("gather metrics from %s", endpoint) - metrics := client.procMetrics(ctx) - resp.Result = append(resp.Result, metrics) - } - return resp, nil + s.logger.Infof("gather metrics from %s", endpoint) + metrics := client.procMetrics(ctx) + resp.Result = append(resp.Result, metrics) + } + return resp, nil + */ + return nil, nil } +/* // getSpecs will return the specs for the program associated with the specified route key/app name, or all programs if no key(s) are specified. // if matchRK or matchApp are empty all results will be returned. func (s *Server) getSpecInfo(matchRK, matchApp string) []specInfo { @@ -554,48 +543,4 @@ func (r *socketRequester) procMetrics(ctx context.Context) *cproto.MetricsRespon res.Result = p return res } - -type upgradeRequest struct { - *cproto.UpgradeRequest -} - -func (r *upgradeRequest) Version() string { - return r.GetVersion() -} - -func (r *upgradeRequest) SourceURI() string { - return r.GetSourceURI() -} - -func (r *upgradeRequest) FleetAction() *fleetapi.ActionUpgrade { - // upgrade request not from Fleet - return nil -} - -func agentStatusToProto(code status.AgentStatusCode) cproto.Status { - if code == status.Degraded { - return cproto.Status_DEGRADED - } - if code == status.Failed { - return cproto.Status_FAILED - } - return cproto.Status_HEALTHY -} - -func agentAppStatusToProto(apps []status.AgentApplicationStatus) []*cproto.ApplicationStatus { - s := make([]*cproto.ApplicationStatus, len(apps)) - for i, a := range apps { - var payload []byte - if a.Payload != nil { - payload, _ = json.Marshal(a.Payload) - } - s[i] = &cproto.ApplicationStatus{ - Id: a.ID, - Name: a.Name, - Status: cproto.Status(a.Status.ToProto()), - Message: a.Message, - Payload: string(payload), - } - } - return s -} +*/ diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index fcf5f8f6c54..35d00df59ef 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -27,7 +27,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/config/operations" "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -249,7 +248,7 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) } // apply caps - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, status.NewController(log)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) if err != nil { return nil, err } diff --git a/internal/pkg/agent/storage/store/state_store.go b/internal/pkg/agent/storage/store/state_store.go index 3316b34960b..8a6d3fc5e8d 100644 --- a/internal/pkg/agent/storage/store/state_store.go +++ b/internal/pkg/agent/storage/store/state_store.go @@ -16,23 +16,18 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) type dispatcher interface { - Dispatch(context.Context, FleetAcker, ...action) error + Dispatch(context.Context, acker.Acker, ...action) error } type store interface { Save(io.Reader) error } -// FleetAcker is an acker of actions to fleet. -type FleetAcker interface { - Ack(ctx context.Context, action fleetapi.Action) error - Commit(ctx context.Context) error -} - type storeLoad interface { store Load() (io.ReadCloser, error) @@ -93,7 +88,7 @@ func NewStateStoreWithMigration(log *logger.Logger, actionStorePath, stateStoreP } // NewStateStoreActionAcker creates a new state store backed action acker. -func NewStateStoreActionAcker(acker FleetAcker, store *StateStore) *StateStoreActionAcker { +func NewStateStoreActionAcker(acker acker.Acker, store *StateStore) *StateStoreActionAcker { return &StateStoreActionAcker{acker: acker, store: store} } @@ -326,7 +321,7 @@ func (s *StateStore) AckToken() string { // its up to the action store to decide if we need to persist the event for future replay or just // discard the event. type StateStoreActionAcker struct { - acker FleetAcker + acker acker.Acker store *StateStore } @@ -350,7 +345,7 @@ func ReplayActions( ctx context.Context, log *logger.Logger, dispatcher dispatcher, - acker FleetAcker, + acker acker.Acker, actions ...action, ) error { log.Info("restoring current policy from disk") diff --git a/internal/pkg/capabilities/capabilities.go b/internal/pkg/capabilities/capabilities.go index fa360a53794..3d03fab9296 100644 --- a/internal/pkg/capabilities/capabilities.go +++ b/internal/pkg/capabilities/capabilities.go @@ -8,11 +8,8 @@ import ( "errors" "os" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -30,14 +27,13 @@ var ( ) type capabilitiesManager struct { - caps []Capability - reporter status.Reporter + caps []Capability } -type capabilityFactory func(*logger.Logger, *ruleDefinitions, status.Reporter) (Capability, error) +type capabilityFactory func(*logger.Logger, *ruleDefinitions) (Capability, error) // Load loads capabilities files and prepares manager. -func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability, error) { +func Load(capsFile string, log *logger.Logger) (Capability, error) { handlers := []capabilityFactory{ newInputsCapability, newOutputsCapability, @@ -45,8 +41,7 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability } cm := &capabilitiesManager{ - caps: make([]Capability, 0), - reporter: sc.RegisterComponentWithPersistance("capabilities", true), + caps: make([]Capability, 0), } // load capabilities from file @@ -56,7 +51,7 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability } if os.IsNotExist(err) { - log.Infof("capabilities file not found in %s", capsFile) + log.Infof("Capabilities file not found in %s", capsFile) return cm, nil } defer fd.Close() @@ -69,7 +64,7 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability // make list of handlers out of capabilities definition for _, h := range handlers { - cap, err := h(log, definitions, cm.reporter) + cap, err := h(log, definitions) if err != nil { return nil, err } @@ -86,8 +81,6 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability func (mgr *capabilitiesManager) Apply(in interface{}) (interface{}, error) { var err error - // reset health on start, child caps will update to fail if needed - mgr.reporter.Update(state.Healthy, "", nil) for _, cap := range mgr.caps { in, err = cap.Apply(in) if err != nil { diff --git a/internal/pkg/capabilities/input.go b/internal/pkg/capabilities/input.go index 7ebc4b4fb15..2428c49f064 100644 --- a/internal/pkg/capabilities/input.go +++ b/internal/pkg/capabilities/input.go @@ -7,11 +7,8 @@ package capabilities import ( "fmt" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -19,7 +16,7 @@ const ( inputsKey = "inputs" ) -func newInputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter status.Reporter) (Capability, error) { +func newInputsCapability(log *logger.Logger, rd *ruleDefinitions) (Capability, error) { if rd == nil { return &multiInputsCapability{log: log, caps: []*inputCapability{}}, nil } @@ -27,7 +24,7 @@ func newInputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter statu caps := make([]*inputCapability, 0, len(rd.Capabilities)) for _, r := range rd.Capabilities { - c, err := newInputCapability(log, r, reporter) + c, err := newInputCapability(log, r) if err != nil { return nil, err } @@ -40,23 +37,21 @@ func newInputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter statu return &multiInputsCapability{log: log, caps: caps}, nil } -func newInputCapability(log *logger.Logger, r ruler, reporter status.Reporter) (*inputCapability, error) { +func newInputCapability(log *logger.Logger, r ruler) (*inputCapability, error) { cap, ok := r.(*inputCapability) if !ok { return nil, nil } cap.log = log - cap.reporter = reporter return cap, nil } type inputCapability struct { - log *logger.Logger - reporter status.Reporter - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"rule" yaml:"rule"` - Input string `json:"input" yaml:"input"` + log *logger.Logger + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"rule" yaml:"rule"` + Input string `json:"input" yaml:"input"` } func (c *inputCapability) Apply(cfgMap map[string]interface{}) (map[string]interface{}, error) { @@ -166,7 +161,6 @@ func (c *inputCapability) renderInputs(inputs []map[string]interface{}) ([]map[s if !isSupported { msg := fmt.Sprintf("input '%s' is not run due to capability restriction '%s'", inputType, c.name()) c.log.Infof(msg) - c.reporter.Update(state.Degraded, msg, nil) } newInputs = append(newInputs, input) diff --git a/internal/pkg/capabilities/output.go b/internal/pkg/capabilities/output.go index de11c3ce3b9..804ca64faa2 100644 --- a/internal/pkg/capabilities/output.go +++ b/internal/pkg/capabilities/output.go @@ -7,10 +7,7 @@ package capabilities import ( "fmt" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -19,7 +16,7 @@ const ( typeKey = "type" ) -func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter status.Reporter) (Capability, error) { +func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions) (Capability, error) { if rd == nil { return &multiOutputsCapability{log: log, caps: []*outputCapability{}}, nil } @@ -27,7 +24,7 @@ func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter stat caps := make([]*outputCapability, 0, len(rd.Capabilities)) for _, r := range rd.Capabilities { - c, err := newOutputCapability(log, r, reporter) + c, err := newOutputCapability(log, r) if err != nil { return nil, err } @@ -40,23 +37,21 @@ func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter stat return &multiOutputsCapability{log: log, caps: caps}, nil } -func newOutputCapability(log *logger.Logger, r ruler, reporter status.Reporter) (*outputCapability, error) { +func newOutputCapability(log *logger.Logger, r ruler) (*outputCapability, error) { cap, ok := r.(*outputCapability) if !ok { return nil, nil } cap.log = log - cap.reporter = reporter return cap, nil } type outputCapability struct { - log *logger.Logger - reporter status.Reporter - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"rule" yaml:"rule"` - Output string `json:"output" yaml:"output"` + log *logger.Logger + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"rule" yaml:"rule"` + Output string `json:"output" yaml:"output"` } func (c *outputCapability) Apply(cfgMap map[string]interface{}) (map[string]interface{}, error) { @@ -133,7 +128,6 @@ func (c *outputCapability) renderOutputs(outputs map[string]interface{}) (map[st if !isSupported { msg := fmt.Sprintf("output '%s' is left out due to capability restriction '%s'", outputName, c.name()) c.log.Errorf(msg) - c.reporter.Update(state.Degraded, msg, nil) } } diff --git a/internal/pkg/capabilities/upgrade.go b/internal/pkg/capabilities/upgrade.go index e39c963e222..2773f6c9709 100644 --- a/internal/pkg/capabilities/upgrade.go +++ b/internal/pkg/capabilities/upgrade.go @@ -8,12 +8,8 @@ import ( "fmt" "strings" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/eql" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -26,7 +22,7 @@ const ( // Available variables: // - version // - source_uri -func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions, reporter status.Reporter) (Capability, error) { +func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions) (Capability, error) { if rd == nil { return &multiUpgradeCapability{caps: []*upgradeCapability{}}, nil } @@ -34,7 +30,7 @@ func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions, reporter sta caps := make([]*upgradeCapability, 0, len(rd.Capabilities)) for _, r := range rd.Capabilities { - c, err := newUpgradeCapability(log, r, reporter) + c, err := newUpgradeCapability(log, r) if err != nil { return nil, err } @@ -47,7 +43,7 @@ func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions, reporter sta return &multiUpgradeCapability{log: log, caps: caps}, nil } -func newUpgradeCapability(log *logger.Logger, r ruler, reporter status.Reporter) (*upgradeCapability, error) { +func newUpgradeCapability(log *logger.Logger, r ruler) (*upgradeCapability, error) { cap, ok := r.(*upgradeCapability) if !ok { return nil, nil @@ -70,15 +66,13 @@ func newUpgradeCapability(log *logger.Logger, r ruler, reporter status.Reporter) cap.upgradeEql = eqlExp cap.log = log - cap.reporter = reporter return cap, nil } type upgradeCapability struct { - log *logger.Logger - reporter status.Reporter - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"rule" yaml:"rule"` + log *logger.Logger + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"rule" yaml:"rule"` // UpgradeEql is eql expression defining upgrade UpgradeEqlDefinition string `json:"upgrade" yaml:"upgrade"` @@ -129,7 +123,6 @@ func (c *upgradeCapability) Apply(upgradeMap map[string]interface{}) (map[string isSupported = !isSupported msg := fmt.Sprintf("upgrade is blocked out due to capability restriction '%s'", c.name()) c.log.Errorf(msg) - c.reporter.Update(state.Degraded, msg, nil) } if !isSupported { @@ -163,31 +156,8 @@ func (c *multiUpgradeCapability) Apply(in interface{}) (interface{}, error) { } func upgradeObject(a interface{}) map[string]interface{} { - resultMap := make(map[string]interface{}) - if ua, ok := a.(upgradeAction); ok { - resultMap[versionKey] = ua.Version() - resultMap[sourceURIKey] = ua.SourceURI() - return resultMap + if m, ok := a.(map[string]interface{}); ok { + return m } - - if ua, ok := a.(*fleetapi.ActionUpgrade); ok { - resultMap[versionKey] = ua.Version - resultMap[sourceURIKey] = ua.SourceURI - return resultMap - } - - if ua, ok := a.(fleetapi.ActionUpgrade); ok { - resultMap[versionKey] = ua.Version - resultMap[sourceURIKey] = ua.SourceURI - return resultMap - } - return nil } - -type upgradeAction interface { - // Version to upgrade to. - Version() string - // SourceURI for download. - SourceURI() string -} diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index 7ee0dc385ca..41b994b7406 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -29,6 +29,9 @@ type Controller interface { // Cancelling the context stops the controller. Run(ctx context.Context) error + // Errors returns the channel to watch for reported errors. + Errors() <-chan error + // Watch returns the channel to watch for variable changes. Watch() <-chan []*transpiler.Vars } @@ -37,6 +40,7 @@ type Controller interface { type controller struct { logger *logger.Logger ch chan []*transpiler.Vars + errCh chan error contextProviders map[string]*contextProviderState dynamicProviders map[string]*dynamicProviderState } @@ -91,6 +95,7 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { return &controller{ logger: l, ch: make(chan []*transpiler.Vars), + errCh: make(chan error), contextProviders: contextProviders, dynamicProviders: dynamicProviders, }, nil @@ -190,6 +195,11 @@ func (c *controller) Run(ctx context.Context) error { } } +// Errors returns the channel to watch for reported errors. +func (c *controller) Errors() <-chan error { + return c.errCh +} + // Watch returns the channel for variable changes. func (c *controller) Watch() <-chan []*transpiler.Vars { return c.ch diff --git a/internal/pkg/fleetapi/acker/noop/noop_acker.go b/internal/pkg/fleetapi/acker/noop/noop_acker.go index 3e2716193f0..644210bd199 100644 --- a/internal/pkg/fleetapi/acker/noop/noop_acker.go +++ b/internal/pkg/fleetapi/acker/noop/noop_acker.go @@ -2,27 +2,28 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package fleet +package noop import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" ) -// Acker is a noop acker. +// noopAcker is a noop acker. // Methods of these acker do nothing. -type Acker struct{} +type noopAcker struct{} -// NewAcker creates a new noop acker. -func NewAcker() *Acker { - return &Acker{} +// New creates a new noop acker. +func New() acker.Acker { + return &noopAcker{} } // Ack acknowledges action. -func (f *Acker) Ack(ctx context.Context, action fleetapi.Action) error { +func (f *noopAcker) Ack(ctx context.Context, action fleetapi.Action) error { return nil } // Commit commits ack actions. -func (*Acker) Commit(ctx context.Context) error { return nil } +func (*noopAcker) Commit(ctx context.Context) error { return nil } diff --git a/pkg/component/fake/main.go b/pkg/component/fake/main.go index d92586aa0a3..5f2eaf920f8 100644 --- a/pkg/component/fake/main.go +++ b/pkg/component/fake/main.go @@ -8,7 +8,6 @@ import ( "context" "errors" "fmt" - "io" "os" "os/signal" "syscall" @@ -78,10 +77,10 @@ func run() error { case client.UnitChangedRemoved: s.removed(change.Unit) } - case err := <-c.Errors(): - if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { - fmt.Fprintf(os.Stderr, "GRPC client error: %s", err) - } + case _ = <-c.Errors(): + //if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { + // fmt.Fprintf(os.Stderr, "GRPC client error: %s", err) + //} } } } diff --git a/pkg/component/load.go b/pkg/component/load.go index 38e934b836f..8a22d5d5f91 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -146,6 +146,15 @@ func LoadRuntimeSpecs(dir string, platform PlatformDetail, opts ...LoadRuntimeOp }, nil } +// Inputs returns the list of supported inputs for this platform. +func (r *RuntimeSpecs) Inputs() []string { + inputs := make([]string, 0, len(r.inputSpecs)) + for inputType, _ := range r.inputSpecs { + inputs = append(inputs, inputType) + } + return inputs +} + // GetInput returns the input runtime specification for this input on this platform. func (r *RuntimeSpecs) GetInput(inputType string) (InputRuntimeSpec, error) { runtime, ok := r.inputSpecs[inputType] diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index e7125e82f68..8fbeeb73ff7 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -48,6 +48,12 @@ var ( ErrNoUnit = errors.New("no unit under control of this manager") ) +// ComponentComponentState provides a structure to map a component to current component state. +type ComponentComponentState struct { + Component component.Component + State ComponentState +} + // Manager for the entire runtime of operating components. type Manager struct { proto.UnimplementedElasticAgentServer @@ -67,8 +73,13 @@ type Manager struct { mx sync.RWMutex current map[string]*componentRuntimeState - subMx sync.RWMutex - subscriptions map[string][]*Subscription + subMx sync.RWMutex + subscriptions map[string][]*Subscription + subAllMx sync.RWMutex + subscribeAll []*SubscriptionAll + subscribeAllInit chan *SubscriptionAll + + errCh chan error shuttingDown atomic.Bool } @@ -87,6 +98,7 @@ func NewManager(logger *logger.Logger, listenAddr string, tracer *apm.Tracer) (* waitReady: make(map[string]waitForReady), current: make(map[string]*componentRuntimeState), subscriptions: make(map[string][]*Subscription), + errCh: make(chan error), } return m, nil } @@ -215,6 +227,11 @@ func (m *Manager) WaitForReady(ctx context.Context) error { } } +// Errors returns channel that errors are reported on. +func (m *Manager) Errors() <-chan error { + return m.errCh +} + // Update updates the currComp state of the running components. // // This returns as soon as possible, work is performed in the background to @@ -229,6 +246,22 @@ func (m *Manager) Update(components []component.Component) error { return m.update(components, true) } +// State returns the current component states. +func (m *Manager) State() []ComponentComponentState { + m.mx.RLock() + defer m.mx.RUnlock() + states := make([]ComponentComponentState, 0, len(m.current)) + for _, crs := range m.current { + crs.latestMx.RLock() + states = append(states, ComponentComponentState{ + Component: crs.currComp, + State: crs.latestState.Copy(), + }) + crs.latestMx.RUnlock() + } + return states +} + // PerformAction executes an action on a unit. func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { id, err := uuid.NewV4() @@ -290,11 +323,11 @@ func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name s // Subscribe to changes in a component. // // Allows a component without that ID to exists. Once a component starts matching that ID then changes will start to -// be provided over the channel. +// be provided over the channel. Cancelling the context results in the subscription being unsubscribed. // // Note: Not reading from a subscription channel will cause the Manager to block. -func (m *Manager) Subscribe(componentID string) *Subscription { - sub := newSubscription(m) +func (m *Manager) Subscribe(ctx context.Context, componentID string) *Subscription { + sub := newSubscription(ctx, m) // add latestState to channel m.mx.RLock() @@ -302,14 +335,88 @@ func (m *Manager) Subscribe(componentID string) *Subscription { m.mx.RUnlock() if ok { comp.latestMx.RLock() - sub.ch <- comp.latestState + latestState := comp.latestState.Copy() comp.latestMx.RUnlock() + go func() { + select { + case <-ctx.Done(): + case sub.ch <- latestState: + } + }() } // add subscription for future changes m.subMx.Lock() m.subscriptions[componentID] = append(m.subscriptions[componentID], sub) - defer m.subMx.Unlock() + m.subMx.Unlock() + + go func() { + <-ctx.Done() + + // unsubscribe + m.subMx.Lock() + defer m.subMx.Unlock() + for key, subs := range m.subscriptions { + for i, s := range subs { + if sub == s { + m.subscriptions[key] = append(m.subscriptions[key][:i], m.subscriptions[key][i+1:]...) + return + } + } + } + }() + + return sub +} + +// SubscribeAll subscribes to all changes in all components. +// +// This provides the current state for existing components at the time of first subscription. Cancelling the context +// results in the subscription being unsubscribed. +// +// Note: Not reading from a subscription channel will cause the Manager to block. +func (m *Manager) SubscribeAll(ctx context.Context) *SubscriptionAll { + sub := newSubscriptionAll(ctx, m) + + // add latest states + m.mx.RLock() + latest := make([]ComponentComponentState, 0, len(m.current)) + for _, comp := range m.current { + comp.latestMx.RLock() + latest = append(latest, ComponentComponentState{Component: comp.currComp, State: comp.latestState.Copy()}) + comp.latestMx.RUnlock() + } + m.mx.RUnlock() + if len(latest) > 0 { + go func() { + for _, l := range latest { + select { + case <-ctx.Done(): + return + case sub.ch <- l: + } + } + }() + } + + // add subscription for future changes + m.subAllMx.Lock() + m.subscribeAll = append(m.subscribeAll, sub) + m.subAllMx.Unlock() + + go func() { + <-ctx.Done() + + // unsubscribe + m.subAllMx.Lock() + defer m.subAllMx.Unlock() + for i, s := range m.subscribeAll { + if sub == s { + m.subscribeAll = append(m.subscribeAll[:i], m.subscribeAll[i+1:]...) + return + } + } + }() return sub } @@ -470,11 +577,26 @@ func (m *Manager) shutdown() { } func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentState) { + m.subAllMx.RLock() + for _, sub := range m.subscribeAll { + select { + case <-sub.ctx.Done(): + case sub.ch <- ComponentComponentState{ + Component: state.currComp, + State: latest, + }: + } + } + m.subAllMx.RUnlock() + m.subMx.RLock() subs, ok := m.subscriptions[state.currComp.ID] if ok { for _, sub := range subs { - sub.ch <- latest + select { + case <-sub.ctx.Done(): + case sub.ch <- latest: + } } } m.subMx.RUnlock() @@ -490,19 +612,6 @@ func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentSta } } -func (m *Manager) unsubscribe(subscription *Subscription) { - m.subMx.Lock() - defer m.subMx.Unlock() - for key, subs := range m.subscriptions { - for i, sub := range subs { - if subscription == sub { - m.subscriptions[key] = append(m.subscriptions[key][:i], m.subscriptions[key][i+1:]...) - return - } - } - } -} - func (m *Manager) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { var cert *tls.Certificate diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index adeb2b1243a..41a62557ea8 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -70,8 +70,7 @@ func TestManager_SimpleComponentErr(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("error-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "error-default") for { select { case <-subCtx.Done(): @@ -179,8 +178,7 @@ func TestManager_FakeInput_StartStop(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -297,8 +295,7 @@ func TestManager_FakeInput_Configure(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -431,8 +428,7 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { go func() { unit1Stopped := false - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -575,8 +571,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -700,8 +695,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { go func() { killed := false - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -849,8 +843,7 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { go func() { wasDegraded := false - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -956,8 +949,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -1166,12 +1158,9 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { subErrCh1 := make(chan error) subErrCh2 := make(chan error) go func() { - sub0 := m.Subscribe("fake-0") - defer sub0.Unsubscribe() - sub1 := m.Subscribe("fake-1") - defer sub1.Unsubscribe() - sub2 := m.Subscribe("fake-2") - defer sub2.Unsubscribe() + sub0 := m.Subscribe(subCtx, "fake-0") + sub1 := m.Subscribe(subCtx, "fake-1") + sub2 := m.Subscribe(subCtx, "fake-2") for { select { case <-subCtx.Done(): diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index ee4800ce36b..b84e5b48202 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -35,12 +35,24 @@ type ComponentUnitKey struct { UnitID string } +// ComponentVersionInfo provides version information reported by the component. +type ComponentVersionInfo struct { + // Name of the binary. + Name string + // Version of the binary. + Version string + // Additional metadata about the binary. + Meta map[string]string +} + // ComponentState is the overall state of the component. type ComponentState struct { State client.UnitState Message string Units map[ComponentUnitKey]ComponentUnitState + + VersionInfo ComponentVersionInfo } func newComponentState(comp *component.Component, initState client.UnitState, initMessage string, initCfgIdx uint64) (s ComponentState) { @@ -157,6 +169,17 @@ func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { } } } + if checkin.VersionInfo != nil { + if checkin.VersionInfo.Name != "" { + s.VersionInfo.Name = checkin.VersionInfo.Name + } + if checkin.VersionInfo.Version != "" { + s.VersionInfo.Version = checkin.VersionInfo.Version + } + if checkin.VersionInfo.Meta != nil { + s.VersionInfo.Meta = checkin.VersionInfo.Meta + } + } return changed } @@ -280,7 +303,9 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. delete(state.actions, ar.Id) } state.actionsMx.Unlock() - callback(ar) + if ok { + callback(ar) + } } } }() diff --git a/pkg/component/runtime/subscription.go b/pkg/component/runtime/subscription.go index 88f4106d21a..15cfeac4f7d 100644 --- a/pkg/component/runtime/subscription.go +++ b/pkg/component/runtime/subscription.go @@ -4,16 +4,22 @@ package runtime +import ( + "context" +) + // Subscription provides a channel for notifications on a component state. type Subscription struct { + ctx context.Context manager *Manager ch chan ComponentState } -func newSubscription(manager *Manager) *Subscription { +func newSubscription(ctx context.Context, manager *Manager) *Subscription { return &Subscription{ + ctx: ctx, manager: manager, - ch: make(chan ComponentState, 1), // buffer of 1 to allow initial latestState state + ch: make(chan ComponentState), } } @@ -22,7 +28,22 @@ func (s *Subscription) Ch() <-chan ComponentState { return s.ch } -// Unsubscribe removes the subscription. -func (s *Subscription) Unsubscribe() { - s.manager.unsubscribe(s) +// SubscriptionAll provides a channel for notifications on all component state changes. +type SubscriptionAll struct { + ctx context.Context + manager *Manager + ch chan ComponentComponentState +} + +func newSubscriptionAll(ctx context.Context, manager *Manager) *SubscriptionAll { + return &SubscriptionAll{ + ctx: ctx, + manager: manager, + ch: make(chan ComponentComponentState), + } +} + +// Ch provides the channel to get state changes. +func (s *SubscriptionAll) Ch() <-chan ComponentComponentState { + return s.ch } diff --git a/pkg/core/server/config.go b/pkg/core/server/config.go deleted file mode 100644 index 0d1dbd9d5e3..00000000000 --- a/pkg/core/server/config.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "fmt" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// Config is a configuration of GRPC server. -type Config struct { - Address string `config:"address"` - Port uint16 `config:"port"` -} - -// DefaultGRPCConfig creates a default server configuration. -func DefaultGRPCConfig() *Config { - return &Config{ - Address: "localhost", - Port: 6789, - } -} - -// NewFromConfig creates a new GRPC server for clients to connect to. -func NewFromConfig(logger *logger.Logger, cfg *Config, handler Handler, tracer *apm.Tracer) (*Server, error) { - return New(logger, fmt.Sprintf("%s:%d", cfg.Address, cfg.Port), handler, tracer) -} diff --git a/pkg/core/server/config_test.go b/pkg/core/server/config_test.go deleted file mode 100644 index 2c846d77892..00000000000 --- a/pkg/core/server/config_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewFromConfig(t *testing.T) { - l := newErrorLogger(t) - cfg := &Config{ - Address: "0.0.0.0", - Port: 9876, - } - srv, err := NewFromConfig(l, cfg, &StubHandler{}, nil) - require.NoError(t, err) - assert.Equal(t, "0.0.0.0:9876", srv.getListenAddr()) -} diff --git a/pkg/core/server/server.go b/pkg/core/server/server.go deleted file mode 100644 index 6d3a284cd79..00000000000 --- a/pkg/core/server/server.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "net" - "strings" - "sync" - "time" - - "go.elastic.co/apm" - "go.elastic.co/apm/module/apmgrpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/gofrs/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - protobuf "google.golang.org/protobuf/proto" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/authority" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - // InitialCheckinTimeout is the maximum amount of wait time from initial check-in stream to - // getting the first check-in observed state. - InitialCheckinTimeout = 5 * time.Second - // CheckinMinimumTimeoutGracePeriod is additional time added to the client.CheckinMinimumTimeout - // to ensure the application is checking in correctly. - CheckinMinimumTimeoutGracePeriod = 30 * time.Second - // WatchdogCheckLoop is the amount of time that the watchdog will wait between checking for - // applications that have not checked in the correct amount of time. - WatchdogCheckLoop = 5 * time.Second -) - -var ( - // ErrApplicationAlreadyRegistered returned when trying to register an application more than once. - ErrApplicationAlreadyRegistered = errors.New("application already registered", errors.TypeApplication) - // ErrApplicationStopping returned when trying to update an application config but it is stopping. - ErrApplicationStopping = errors.New("application stopping", errors.TypeApplication) - // ErrApplicationStopTimedOut returned when calling Stop and the application timed out stopping. - ErrApplicationStopTimedOut = errors.New("application stopping timed out", errors.TypeApplication) - // ErrActionTimedOut returned on PerformAction when the action timed out. - ErrActionTimedOut = errors.New("application action timed out", errors.TypeApplication) - // ErrActionCancelled returned on PerformAction when an action is cancelled, normally due to the application - // being stopped or removed from the server. - ErrActionCancelled = errors.New("application action cancelled", errors.TypeApplication) -) - -// ApplicationState represents the applications state according to the server. -type ApplicationState struct { - srv *Server - app interface{} - - srvName string - token string - cert *authority.Pair - - pendingExpected chan *proto.StateExpected - expected proto.StateExpected_State - expectedConfigIdx uint64 - expectedConfig string - status proto.StateObserved_Status - statusMessage string - statusPayload map[string]interface{} - statusPayloadStr string - statusConfigIdx uint64 - statusTime time.Time - checkinConn bool - checkinDone chan bool - checkinLock sync.RWMutex - - pendingActions chan *pendingAction - sentActions map[string]*sentAction - actionsConn bool - actionsDone chan bool - actionsLock sync.RWMutex - - inputTypes map[string]struct{} -} - -// Handler is the used by the server to inform of status changes. -type Handler interface { - // OnStatusChange called when a registered application observed status is changed. - OnStatusChange(*ApplicationState, proto.StateObserved_Status, string, map[string]interface{}) -} - -// Server is the GRPC server that the launched applications connect back to. -type Server struct { - proto.UnimplementedElasticAgentServer - - logger *logger.Logger - ca *authority.CertificateAuthority - listenAddr string - handler Handler - tracer *apm.Tracer - - listener net.Listener - server *grpc.Server - watchdogDone chan bool - watchdogWG sync.WaitGroup - - apps sync.Map - - // overridden in tests - watchdogCheckInterval time.Duration - checkInMinTimeout time.Duration -} - -// New creates a new GRPC server for clients to connect to. -func New(logger *logger.Logger, listenAddr string, handler Handler, tracer *apm.Tracer) (*Server, error) { - ca, err := authority.NewCA() - if err != nil { - return nil, err - } - return &Server{ - logger: logger, - ca: ca, - listenAddr: listenAddr, - handler: handler, - watchdogCheckInterval: WatchdogCheckLoop, - checkInMinTimeout: client.CheckinMinimumTimeout + CheckinMinimumTimeoutGracePeriod, - tracer: tracer, - }, nil -} - -// Start starts the GRPC endpoint and accepts new connections. -func (s *Server) Start() error { - if s.server != nil { - // already started - return nil - } - - lis, err := net.Listen("tcp", s.listenAddr) - if err != nil { - return err - } - s.listener = lis - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(s.ca.Crt()); !ok { - return errors.New("failed to append root CA", errors.TypeSecurity) - } - creds := credentials.NewTLS(&tls.Config{ - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: certPool, - GetCertificate: s.getCertificate, - MinVersion: tls.VersionTLS12, - }) - if s.tracer != nil { - apmInterceptor := apmgrpc.NewUnaryServerInterceptor(apmgrpc.WithRecovery(), apmgrpc.WithTracer(s.tracer)) - s.server = grpc.NewServer( - grpc.UnaryInterceptor(apmInterceptor), - grpc.Creds(creds), - ) - } else { - s.server = grpc.NewServer(grpc.Creds(creds)) - } - proto.RegisterElasticAgentServer(s.server, s) - - // start serving GRPC connections - go func() { - err := s.server.Serve(lis) - if err != nil { - s.logger.Errorf("error listening for GRPC: %s", err) - } - }() - - // start the watchdog - s.watchdogDone = make(chan bool) - s.watchdogWG.Add(1) - go s.watchdog() - - return nil -} - -// Stop stops the GRPC endpoint. -func (s *Server) Stop() { - if s.server != nil { - close(s.watchdogDone) - s.server.Stop() - s.server = nil - s.listener = nil - s.watchdogWG.Wait() - } -} - -// Get returns the application state from the server for the passed application. -func (s *Server) Get(app interface{}) (*ApplicationState, bool) { - var foundState *ApplicationState - s.apps.Range(func(_ interface{}, val interface{}) bool { - as, ok := val.(*ApplicationState) - if !ok { - return true - } - if as.app == app { - foundState = as - return false - } - return true - }) - return foundState, foundState != nil -} - -// FindByInputType application by input type -func (s *Server) FindByInputType(inputType string) (*ApplicationState, bool) { - var foundState *ApplicationState - s.apps.Range(func(_ interface{}, val interface{}) bool { - as, ok := val.(*ApplicationState) - if !ok { - return true - } - if as.inputTypes == nil { - return true - } - - if _, ok := as.inputTypes[inputType]; ok { - foundState = as - return false - } - return true - }) - return foundState, foundState != nil -} - -// Register registers a new application to connect to the server. -func (s *Server) Register(app interface{}, config string) (*ApplicationState, error) { - if _, ok := s.Get(app); ok { - return nil, ErrApplicationAlreadyRegistered - } - - id, err := uuid.NewV4() - if err != nil { - return nil, err - } - srvName, err := genServerName() - if err != nil { - return nil, err - } - pair, err := s.ca.GeneratePairWithName(srvName) - if err != nil { - return nil, err - } - appState := &ApplicationState{ - srv: s, - app: app, - srvName: srvName, - token: id.String(), - cert: pair, - pendingExpected: make(chan *proto.StateExpected), - expected: proto.StateExpected_RUNNING, - expectedConfigIdx: 1, - expectedConfig: config, - checkinConn: true, - status: proto.StateObserved_STARTING, - statusConfigIdx: client.InitialConfigIdx, - statusTime: time.Now().UTC(), - pendingActions: make(chan *pendingAction, 100), - sentActions: make(map[string]*sentAction), - actionsConn: true, - } - s.apps.Store(appState.token, appState) - return appState, nil -} - -// Checkin implements the GRPC bi-direction stream connection for check-ins. -func (s *Server) Checkin(server proto.ElasticAgent_CheckinServer) error { - firstCheckinChan := make(chan *proto.StateObserved) - go func() { - // go func will not be leaked, because when the main function - // returns it will close the connection. that will cause this - // function to return. - observed, err := server.Recv() - if err != nil { - close(firstCheckinChan) - return - } - firstCheckinChan <- observed - }() - - var ok bool - var observedConfigStateIdx uint64 - var firstCheckin *proto.StateObserved - select { - case firstCheckin, ok = <-firstCheckinChan: - if firstCheckin != nil { - observedConfigStateIdx = firstCheckin.ConfigStateIdx - } - break - case <-time.After(InitialCheckinTimeout): - // close connection - s.logger.Debug("check-in stream never sent initial observed message; closing connection") - return nil - } - if !ok { - // close connection - return nil - } - appState, ok := s.getByToken(firstCheckin.Token) - if !ok { - // no application with token; close connection - s.logger.Debug("check-in stream sent an invalid token; closing connection") - return status.Error(codes.PermissionDenied, "invalid token") - } - appState.checkinLock.Lock() - if appState.checkinDone != nil { - // application is already connected (cannot have multiple); close connection - appState.checkinLock.Unlock() - s.logger.Debug("check-in stream already exists for application; closing connection") - return status.Error(codes.AlreadyExists, "application already connected") - } - if !appState.checkinConn { - // application is being destroyed cannot reconnect; close connection - appState.checkinLock.Unlock() - s.logger.Debug("check-in stream cannot connect, application is being destroyed; closing connection") - return status.Error(codes.Unavailable, "application cannot connect being destroyed") - } - - // application is running as a service and counter is already counting - // force config reload - if observedConfigStateIdx > 0 { - appState.expectedConfigIdx = observedConfigStateIdx + 1 - } - - checkinDone := make(chan bool) - appState.checkinDone = checkinDone - appState.checkinLock.Unlock() - - defer func() { - appState.checkinLock.Lock() - appState.checkinDone = nil - appState.checkinLock.Unlock() - }() - - // send the config and expected state changes to the applications when - // pushed on the channel - recvDone := make(chan bool) - sendDone := make(chan bool) - go func() { - defer func() { - close(sendDone) - }() - for { - var expected *proto.StateExpected - select { - case <-checkinDone: - return - case <-recvDone: - return - case expected = <-appState.pendingExpected: - } - - err := server.Send(expected) - if err != nil { - if reportableErr(err) { - s.logger.Debugf("check-in stream failed to send expected state: %s", err) - } - return - } - } - }() - - // update status after the pendingExpected channel has a reader - appState.updateStatus(firstCheckin, true) - - // read incoming state observations from the application and act based on - // the servers expected state of the application - go func() { - for { - checkin, err := server.Recv() - if err != nil { - if reportableErr(err) { - s.logger.Debugf("check-in stream failed to receive data: %s", err) - } - close(recvDone) - return - } - appState.updateStatus(checkin, false) - } - }() - - <-sendDone - return nil -} - -// CheckinV2 implements the GRPC bi-direction stream connection for v2 check-ins. -func (s *Server) CheckinV2(server proto.ElasticAgent_CheckinV2Server) error { - return errors.New("not implemented") -} - -// Actions implements the GRPC bi-direction stream connection for actions. -func (s *Server) Actions(server proto.ElasticAgent_ActionsServer) error { - firstRespChan := make(chan *proto.ActionResponse) - go func() { - // go func will not be leaked, because when the main function - // returns it will close the connection. that will cause this - // function to return. - observed, err := server.Recv() - if err != nil { - close(firstRespChan) - return - } - firstRespChan <- observed - }() - - var ok bool - var firstResp *proto.ActionResponse - select { - case firstResp, ok = <-firstRespChan: - break - case <-time.After(InitialCheckinTimeout): - // close connection - s.logger.Debug("actions stream never sent initial response message; closing connection") - return nil - } - if !ok { - // close connection - return nil - } - if firstResp.Id != client.ActionResponseInitID { - // close connection - s.logger.Debug("actions stream first response message must be an init message; closing connection") - return status.Error(codes.InvalidArgument, "initial response must be an init message") - } - appState, ok := s.getByToken(firstResp.Token) - if !ok { - // no application with token; close connection - s.logger.Debug("actions stream sent an invalid token; closing connection") - return status.Error(codes.PermissionDenied, "invalid token") - } - appState.actionsLock.Lock() - if appState.actionsDone != nil { - // application is already connected (cannot have multiple); close connection - appState.actionsLock.Unlock() - s.logger.Debug("actions stream already exists for application; closing connection") - return status.Error(codes.AlreadyExists, "application already connected") - } - if !appState.actionsConn { - // application is being destroyed cannot reconnect; close connection - appState.actionsLock.Unlock() - s.logger.Debug("actions stream cannot connect, application is being destroyed; closing connection") - return status.Error(codes.Unavailable, "application cannot connect being destroyed") - } - actionsDone := make(chan bool) - appState.actionsDone = actionsDone - appState.actionsLock.Unlock() - - defer func() { - appState.actionsLock.Lock() - appState.actionsDone = nil - appState.actionsLock.Unlock() - }() - - // send the pending actions that need to be performed - recvDone := make(chan bool) - sendDone := make(chan bool) - go func() { - defer func() { close(sendDone) }() - for { - var pending *pendingAction - select { - case <-actionsDone: - return - case <-recvDone: - return - case pending = <-appState.pendingActions: - } - - if pending.expiresOn.Sub(time.Now().UTC()) <= 0 { - // to late action already expired - pending.callback(nil, ErrActionTimedOut) - continue - } - - appState.actionsLock.Lock() - err := server.Send(&proto.ActionRequest{ - Id: pending.id, - Name: pending.name, - Params: pending.params, - }) - if err != nil { - // failed to send action; add back to channel to retry on re-connect from the client - appState.actionsLock.Unlock() - appState.pendingActions <- pending - if reportableErr(err) { - s.logger.Debugf("failed to send pending action %s (will retry, after re-connect): %s", pending.id, err) - } - return - } - appState.sentActions[pending.id] = &sentAction{ - callback: pending.callback, - expiresOn: pending.expiresOn, - } - appState.actionsLock.Unlock() - } - }() - - // receive the finished actions - go func() { - for { - response, err := server.Recv() - if err != nil { - if reportableErr(err) { - s.logger.Debugf("actions stream failed to receive data: %s", err) - } - close(recvDone) - return - } - appState.actionsLock.Lock() - action, ok := appState.sentActions[response.Id] - if !ok { - // nothing to do, unknown action request - s.logger.Debugf("actions stream received an unknown action: %s", response.Id) - appState.actionsLock.Unlock() - continue - } - delete(appState.sentActions, response.Id) - appState.actionsLock.Unlock() - - var result map[string]interface{} - err = json.Unmarshal(response.Result, &result) - if err != nil { - action.callback(nil, err) - } else if response.Status == proto.ActionResponse_FAILED { - errStr, ok := result["error"] - if ok { - err = fmt.Errorf("%s", errStr) - } else { - err = fmt.Errorf("unknown error") - } - action.callback(nil, err) - } else { - action.callback(result, nil) - } - } - }() - - <-sendDone - return nil -} - -// WriteConnInfo writes the connection information for the application into the writer. -// -// Note: If the writer implements io.Closer the writer is also closed. -func (as *ApplicationState) WriteConnInfo(w io.Writer) error { - connInfo := &proto.ConnInfo{ - Addr: as.srv.getListenAddr(), - ServerName: as.srvName, - Token: as.token, - CaCert: as.srv.ca.Crt(), - PeerCert: as.cert.Crt, - PeerKey: as.cert.Key, - } - infoBytes, err := protobuf.Marshal(connInfo) - if err != nil { - return errors.New(err, "failed to marshal connection information", errors.TypeApplication) - } - _, err = w.Write(infoBytes) - if err != nil { - return errors.New(err, "failed to write connection information", errors.TypeApplication) - } - closer, ok := w.(io.Closer) - if ok { - _ = closer.Close() - } - return nil -} - -// Stop instructs the application to stop gracefully within the timeout. -// -// Once the application is stopped or the timeout is reached the application is destroyed. Even in the case -// the application times out during stop and ErrApplication -func (as *ApplicationState) Stop(timeout time.Duration) error { - as.checkinLock.Lock() - wasConn := as.checkinDone != nil - cfgIdx := as.statusConfigIdx - as.expected = proto.StateExpected_STOPPING - as.checkinLock.Unlock() - - // send it to the client if its connected, otherwise it will be sent once it connects. - as.sendExpectedState(&proto.StateExpected{ - State: proto.StateExpected_STOPPING, - ConfigStateIdx: cfgIdx, - Config: "", - }, false) - - started := time.Now().UTC() - for { - if time.Now().UTC().Sub(started) > timeout { - as.Destroy() - return ErrApplicationStopTimedOut - } - - as.checkinLock.RLock() - s := as.status - doneChan := as.checkinDone - as.checkinLock.RUnlock() - if (wasConn && doneChan == nil) || (!wasConn && s == proto.StateObserved_STOPPING && doneChan == nil) { - // either occurred: - // * client was connected then disconnected on stop - // * client was not connected; connected; received stopping; then disconnected - as.Destroy() - return nil - } - - <-time.After(500 * time.Millisecond) - } -} - -// Destroy completely removes the application from the server without sending any stop command to the application. -// -// The ApplicationState at this point cannot be used. -func (as *ApplicationState) Destroy() { - as.destroyActionsStream() - as.destroyCheckinStream() - as.srv.apps.Delete(as.token) -} - -// UpdateConfig pushes an updated configuration to the connected application. -func (as *ApplicationState) UpdateConfig(config string) error { - as.checkinLock.RLock() - expected := as.expected - currentCfg := as.expectedConfig - as.checkinLock.RUnlock() - if expected == proto.StateExpected_STOPPING { - return ErrApplicationStopping - } - if config == currentCfg { - // already at that expected config - return nil - } - - as.checkinLock.Lock() - idx := as.expectedConfigIdx + 1 - as.expectedConfigIdx = idx - as.expectedConfig = config - as.checkinLock.Unlock() - - // send it to the client if its connected, otherwise it will be sent once it connects. - as.sendExpectedState(&proto.StateExpected{ - State: expected, - ConfigStateIdx: idx, - Config: config, - }, false) - return nil -} - -// PerformAction synchronously performs an action on the application. -func (as *ApplicationState) PerformAction(name string, params map[string]interface{}, timeout time.Duration) (map[string]interface{}, error) { - paramBytes, err := json.Marshal(params) - if err != nil { - return nil, err - } - id, err := uuid.NewV4() - if err != nil { - return nil, err - } - if !as.actionsConn { - // actions stream destroyed, action cancelled - return nil, ErrActionCancelled - } - - resChan := make(chan actionResult) - as.pendingActions <- &pendingAction{ - id: id.String(), - name: name, - params: paramBytes, - callback: func(m map[string]interface{}, err error) { - resChan <- actionResult{ - result: m, - err: err, - } - }, - expiresOn: time.Now().UTC().Add(timeout), - } - res := <-resChan - return res.result, res.err -} - -// App returns the registered app for the state. -func (as *ApplicationState) App() interface{} { - return as.app -} - -// Expected returns the expected state of the process. -func (as *ApplicationState) Expected() proto.StateExpected_State { - as.checkinLock.RLock() - defer as.checkinLock.RUnlock() - return as.expected -} - -// Config returns the expected config of the process. -func (as *ApplicationState) Config() string { - as.checkinLock.RLock() - defer as.checkinLock.RUnlock() - return as.expectedConfig -} - -// Status returns the current observed status. -func (as *ApplicationState) Status() (proto.StateObserved_Status, string, map[string]interface{}) { - as.checkinLock.RLock() - defer as.checkinLock.RUnlock() - return as.status, as.statusMessage, as.statusPayload -} - -// SetStatus allows the status to be overwritten by the agent. -// -// This status will be overwritten by the client if it reconnects and updates it status. -func (as *ApplicationState) SetStatus(status proto.StateObserved_Status, msg string, payload map[string]interface{}) error { - payloadStr, err := json.Marshal(payload) - if err != nil { - return err - } - as.checkinLock.RLock() - as.status = status - as.statusMessage = msg - as.statusPayload = payload - as.statusPayloadStr = string(payloadStr) - as.checkinLock.RUnlock() - return nil -} - -// SetInputTypes sets the allowed action input types for this application -func (as *ApplicationState) SetInputTypes(inputTypes []string) { - as.checkinLock.Lock() - as.inputTypes = make(map[string]struct{}) - for _, inputType := range inputTypes { - as.inputTypes[inputType] = struct{}{} - } - as.checkinLock.Unlock() -} - -// updateStatus updates the current observed status from the application, sends the expected state back to the -// application if the server expects it to be different then its observed state, and alerts the handler on the -// server when the application status has changed. -func (as *ApplicationState) updateStatus(checkin *proto.StateObserved, waitForReader bool) { - // convert payload from string to JSON - var payload map[string]interface{} - if checkin.Payload != "" { - // ignore the error, if client is sending bad JSON, then payload will just be nil - _ = json.Unmarshal([]byte(checkin.Payload), &payload) - } - - as.checkinLock.Lock() - expectedStatus := as.expected - expectedConfigIdx := as.expectedConfigIdx - expectedConfig := as.expectedConfig - prevStatus := as.status - prevMessage := as.statusMessage - prevPayloadStr := as.statusPayloadStr - as.status = checkin.Status - as.statusMessage = checkin.Message - as.statusPayloadStr = checkin.Payload - as.statusPayload = payload - as.statusConfigIdx = checkin.ConfigStateIdx - as.statusTime = time.Now().UTC() - as.checkinLock.Unlock() - - var expected *proto.StateExpected - if expectedStatus == proto.StateExpected_STOPPING && checkin.Status != proto.StateObserved_STOPPING { - expected = &proto.StateExpected{ - State: expectedStatus, - ConfigStateIdx: checkin.ConfigStateIdx, // stopping always inform that the config it has is correct - Config: "", - } - } else if checkin.ConfigStateIdx != expectedConfigIdx { - expected = &proto.StateExpected{ - State: expectedStatus, - ConfigStateIdx: expectedConfigIdx, - Config: expectedConfig, - } - } - if expected != nil { - as.sendExpectedState(expected, waitForReader) - } - - // alert the service handler that status has changed for the application - if prevStatus != checkin.Status || prevMessage != checkin.Message || prevPayloadStr != checkin.Payload { - as.srv.handler.OnStatusChange(as, checkin.Status, checkin.Message, payload) - } -} - -// sendExpectedState sends the expected status over the pendingExpected channel if the other side is -// waiting for a message. -func (as *ApplicationState) sendExpectedState(expected *proto.StateExpected, waitForReader bool) { - if waitForReader { - as.pendingExpected <- expected - return - } - - select { - case as.pendingExpected <- expected: - default: - } -} - -// destroyActionsStream disconnects the actions stream (prevent reconnect), cancel all pending actions -func (as *ApplicationState) destroyActionsStream() { - as.actionsLock.Lock() - as.actionsConn = false - if as.actionsDone != nil { - close(as.actionsDone) - as.actionsDone = nil - } - as.actionsLock.Unlock() - as.cancelActions() -} - -// flushExpiredActions flushes any expired actions from the pending channel or current processing. -func (as *ApplicationState) flushExpiredActions() { - now := time.Now().UTC() - pendingActions := make([]*pendingAction, 0, len(as.pendingActions)) - for { - done := false - select { - case pending := <-as.pendingActions: - pendingActions = append(pendingActions, pending) - default: - done = true - } - if done { - break - } - } - for _, pending := range pendingActions { - if pending.expiresOn.Sub(now) <= 0 { - pending.callback(nil, ErrActionTimedOut) - } else { - as.pendingActions <- pending - } - } - as.actionsLock.Lock() - for id, pendingResp := range as.sentActions { - if pendingResp.expiresOn.Sub(now) <= 0 { - delete(as.sentActions, id) - pendingResp.callback(nil, ErrActionTimedOut) - } - } - as.actionsLock.Unlock() -} - -// cancelActions cancels all pending or currently processing actions. -func (as *ApplicationState) cancelActions() { - for { - done := false - select { - case pending := <-as.pendingActions: - pending.callback(nil, ErrActionCancelled) - default: - done = true - } - if done { - break - } - } - as.actionsLock.Lock() - for id, pendingResp := range as.sentActions { - delete(as.sentActions, id) - pendingResp.callback(nil, ErrActionCancelled) - } - as.actionsLock.Unlock() -} - -// destroyCheckinStream disconnects the check stream (prevent reconnect). -func (as *ApplicationState) destroyCheckinStream() { - as.checkinLock.Lock() - as.checkinConn = false - if as.checkinDone != nil { - close(as.checkinDone) - as.checkinDone = nil - } - as.checkinLock.Unlock() -} - -// watchdog ensures that the current applications are checking in during the correct intervals of time. -func (s *Server) watchdog() { - defer s.watchdogWG.Done() - for { - t := time.NewTimer(s.watchdogCheckInterval) - select { - case <-s.watchdogDone: - t.Stop() - return - case <-t.C: - } - - now := time.Now().UTC() - s.apps.Range(func(_ interface{}, val interface{}) bool { - serverApp, ok := val.(*ApplicationState) - if !ok { - return true - } - serverApp.checkinLock.RLock() - statusTime := serverApp.statusTime - serverApp.checkinLock.RUnlock() - if now.Sub(statusTime) > s.checkInMinTimeout { - serverApp.checkinLock.Lock() - prevStatus := serverApp.status - s := prevStatus - prevMessage := serverApp.statusMessage - message := prevMessage - if serverApp.status == proto.StateObserved_DEGRADED { - s = proto.StateObserved_FAILED - message = "Missed two check-ins" - serverApp.status = s - serverApp.statusMessage = message - serverApp.statusPayload = nil - serverApp.statusPayloadStr = "" - serverApp.statusTime = now - } else if serverApp.status != proto.StateObserved_FAILED { - s = proto.StateObserved_DEGRADED - message = "Missed last check-in" - serverApp.status = s - serverApp.statusMessage = message - serverApp.statusPayload = nil - serverApp.statusPayloadStr = "" - serverApp.statusTime = now - } - serverApp.checkinLock.Unlock() - if prevStatus != s || prevMessage != message { - serverApp.srv.handler.OnStatusChange(serverApp, s, message, nil) - } - } - serverApp.flushExpiredActions() - return true - }) - } -} - -// getByToken returns an application state by its token. -func (s *Server) getByToken(token string) (*ApplicationState, bool) { - val, ok := s.apps.Load(token) - if ok { - return val.(*ApplicationState), true - } - return nil, false -} - -// getCertificate returns the TLS certificate based on the clientHello or errors if not found. -func (s *Server) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { - var cert *tls.Certificate - s.apps.Range(func(_ interface{}, val interface{}) bool { - sa, ok := val.(*ApplicationState) - if !ok { - return true - } - if sa.srvName == chi.ServerName { - cert = sa.cert.Certificate - return false - } - return true - }) - if cert != nil { - return cert, nil - } - return nil, errors.New("no supported TLS certificate", errors.TypeSecurity) -} - -// getListenAddr returns the listening address of the server. -func (s *Server) getListenAddr() string { - addr := strings.SplitN(s.listenAddr, ":", 2) - if len(addr) == 2 && addr[1] == "0" { - port := s.listener.Addr().(*net.TCPAddr).Port - return fmt.Sprintf("%s:%d", addr[0], port) - } - return s.listenAddr -} - -type pendingAction struct { - id string - name string - params []byte - callback func(map[string]interface{}, error) - expiresOn time.Time -} - -type sentAction struct { - callback func(map[string]interface{}, error) - expiresOn time.Time -} - -type actionResult struct { - result map[string]interface{} - err error -} - -func reportableErr(err error) bool { - if errors.Is(err, io.EOF) { - return false - } - s, ok := status.FromError(err) - if !ok { - return true - } - if s.Code() == codes.Canceled { - return false - } - return true -} - -func genServerName() (string, error) { - u, err := uuid.NewV4() - if err != nil { - return "", err - } - return strings.Replace(u.String(), "-", "", -1), nil -} diff --git a/pkg/core/server/server_test.go b/pkg/core/server/server_test.go deleted file mode 100644 index a2a1bdf4f80..00000000000 --- a/pkg/core/server/server_test.go +++ /dev/null @@ -1,794 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//nolint:dupl // tests are equivalent -package server - -import ( - "context" - "fmt" - "io" - "strings" - "sync" - "testing" - "time" - - "go.elastic.co/apm/apmtest" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - "github.com/elastic/elastic-agent-libs/logp" - - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - initConfig = "initial_config" - newConfig = "new_config" -) - -func TestServer_Register(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - _, err := srv.Register(app, initConfig) - assert.NoError(t, err) - _, err = srv.Register(app, initConfig) - assert.Equal(t, ErrApplicationAlreadyRegistered, err) -} - -func TestServer_Get(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - expected, err := srv.Register(app, initConfig) - require.NoError(t, err) - observed, ok := srv.Get(app) - assert.True(t, ok) - assert.Equal(t, expected, observed) - _, found := srv.Get(&StubApp{}) - assert.False(t, found) -} - -func TestServer_InitialCheckIn(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // client should get initial check-in - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - - // set status as healthy and running - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - - // application state should be updated - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) -} - -func TestServer_MultiClients(t *testing.T) { - initConfig1 := "initial_config_1" - initConfig2 := "initial_config_2" - app1 := &StubApp{} - app2 := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as1, err := srv.Register(app1, initConfig1) - require.NoError(t, err) - cImpl1 := &StubClientImpl{} - c1 := newClientFromApplicationState(t, as1, cImpl1) - require.NoError(t, c1.Start(context.Background())) - defer c1.Stop() - as2, err := srv.Register(app2, initConfig2) - require.NoError(t, err) - cImpl2 := &StubClientImpl{} - c2 := newClientFromApplicationState(t, as2, cImpl2) - require.NoError(t, c2.Start(context.Background())) - defer c2.Stop() - - // clients should get initial check-ins - require.NoError(t, waitFor(func() error { - if cImpl1.Config() != initConfig1 { - return fmt.Errorf("client never got initial config") - } - return nil - })) - require.NoError(t, waitFor(func() error { - if cImpl2.Config() != initConfig2 { - return fmt.Errorf("client never got initial config") - } - return nil - })) - - // set status differently - err = c1.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - err = c2.Status(proto.StateObserved_DEGRADED, "No upstream connection", nil) - require.NoError(t, err) - - // application states should be updated - assert.NoError(t, waitFor(func() error { - if app1.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - assert.NoError(t, waitFor(func() error { - if app2.Status() != proto.StateObserved_DEGRADED { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) -} - -func TestServer_PreventCheckinStream(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - as.checkinConn = false // prevent connection to check-in stream - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - assert.NoError(t, waitFor(func() error { - if cImpl.Error() == nil { - return fmt.Errorf("client never got error trying to connect twice") - } - s, ok := status.FromError(cImpl.Error()) - if !ok { - return fmt.Errorf("client didn't get a status error") - } - if s.Code() != codes.Unavailable { - return fmt.Errorf("client didn't get unavaible error") - } - return nil - })) -} - -func TestServer_PreventActionsStream(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - as.actionsConn = false // prevent connection to check-in stream - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - assert.NoError(t, waitFor(func() error { - if cImpl.Error() == nil { - return fmt.Errorf("client never got error trying to connect twice") - } - s, ok := status.FromError(cImpl.Error()) - if !ok { - return fmt.Errorf("client didn't get a status error") - } - if s.Code() != codes.Unavailable { - return fmt.Errorf("client didn't get unavaible error") - } - return nil - })) -} - -func TestServer_DestroyPreventConnectAtTLS(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - as.Destroy() - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - assert.NoError(t, waitFor(func() error { - if cImpl.Error() == nil { - return fmt.Errorf("client never got error trying to connect twice") - } - s, ok := status.FromError(cImpl.Error()) - if !ok { - return fmt.Errorf("client didn't get a status error") - } - if s.Code() != codes.Unavailable { - return fmt.Errorf("client didn't get unavaible error") - } - if !strings.Contains(s.Message(), "authentication handshake failed") { - return fmt.Errorf("client didn't get authentication handshake failed error") - } - return nil - })) -} - -func TestServer_UpdateConfig(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // push same config; should not increment config index - preIdx := as.expectedConfigIdx - require.NoError(t, as.UpdateConfig(initConfig)) - assert.Equal(t, preIdx, as.expectedConfigIdx) - - // push new config; should update the client - require.NoError(t, as.UpdateConfig(newConfig)) - assert.Equal(t, preIdx+1, as.expectedConfigIdx) - assert.NoError(t, waitFor(func() error { - if cImpl.Config() != newConfig { - return fmt.Errorf("client never got updated config") - } - return nil - })) -} - -func TestServer_UpdateConfigDisconnected(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // stop the client, then update the config - c.Stop() - require.NoError(t, as.UpdateConfig(newConfig)) - - // reconnect, client should get latest config - require.NoError(t, c.Start(context.Background())) - assert.NoError(t, waitFor(func() error { - if cImpl.Config() != newConfig { - return fmt.Errorf("client never got updated config") - } - return nil - })) -} - -func TestServer_UpdateConfigStopping(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // perform stop try to update config (which will error) - done := make(chan bool) - go func() { - _ = as.Stop(500 * time.Millisecond) - close(done) - }() - err = as.UpdateConfig(newConfig) - assert.Error(t, ErrApplicationStopping, err) - <-done -} - -func TestServer_Stop(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // send stop to the client - done := make(chan bool) - var stopErr error - go func() { - stopErr = as.Stop(time.Second * 5) - close(done) - }() - - // process of testing the flow - // 1. server sends stop - // 2. client sends configuring - // 3. server sends stop again - // 4. client sends stopping - // 5. client disconnects - require.NoError(t, waitFor(func() error { - if cImpl.Stop() == 0 { - return fmt.Errorf("client never got expected stop") - } - return nil - })) - err = c.Status(proto.StateObserved_CONFIGURING, "Configuring", nil) - require.NoError(t, err) - require.NoError(t, waitFor(func() error { - if cImpl.Stop() < 1 { - return fmt.Errorf("client never got expected stop again") - } - return nil - })) - err = c.Status(proto.StateObserved_STOPPING, "Stopping", nil) - require.NoError(t, err) - require.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_STOPPING { - return fmt.Errorf("server never updated to stopping") - } - return nil - })) - c.Stop() - <-done - - // no error on stop - assert.NoError(t, stopErr) -} - -func TestServer_StopJustDisconnect(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // send stop to the client - done := make(chan bool) - var stopErr error - go func() { - stopErr = as.Stop(time.Second * 5) - close(done) - }() - - // process of testing the flow - // 1. server sends stop - // 2. client disconnects - require.NoError(t, waitFor(func() error { - if cImpl.Stop() == 0 { - return fmt.Errorf("client never got expected stop") - } - return nil - })) - c.Stop() - <-done - - // no error on stop - assert.NoError(t, stopErr) -} - -func TestServer_StopTimeout(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // send stop to the client - done := make(chan bool) - var stopErr error - go func() { - stopErr = as.Stop(time.Millisecond) - close(done) - }() - - // don't actually stop the client - - // timeout error on stop - <-done - assert.Equal(t, ErrApplicationStopTimedOut, stopErr) -} - -func TestServer_WatchdogFailApp(t *testing.T) { - checkMinTimeout := 300 * time.Millisecond - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}, func(s *Server) { - s.watchdogCheckInterval = 100 * time.Millisecond - s.checkInMinTimeout = checkMinTimeout - }) - defer srv.Stop() - _, err := srv.Register(app, initConfig) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_DEGRADED { - return fmt.Errorf("app status nevers set to degraded") - } - return nil - })) - assert.Equal(t, "Missed last check-in", app.Message()) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_FAILED { - return fmt.Errorf("app status nevers set to degraded") - } - return nil - })) - assert.Equal(t, "Missed two check-ins", app.Message()) -} - -func TestServer_PerformAction(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}, func(s *Server) { - s.watchdogCheckInterval = 50 * time.Millisecond - }) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl, &EchoAction{}, &SleepAction{}) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // successful action - resp, err := as.PerformAction("echo", map[string]interface{}{ - "echo": "hello world", - }, 5*time.Second) - require.NoError(t, err) - assert.Equal(t, map[string]interface{}{ - "echo": "hello world", - }, resp) - - // action error client-side - _, err = as.PerformAction("echo", map[string]interface{}{ - "bad_param": "hello world", - }, 5*time.Second) - require.Error(t, err) - - // very slow action that times out - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 10*time.Millisecond) - require.Error(t, err) - assert.Equal(t, ErrActionTimedOut, err) - - // try slow action again with the client disconnected (should timeout the same) - c.Stop() - require.NoError(t, waitFor(func() error { - as.actionsLock.RLock() - defer as.actionsLock.RUnlock() - if as.actionsDone != nil { - return fmt.Errorf("client never disconnected the actions stream") - } - return nil - })) - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 10*time.Millisecond) - require.Error(t, err) - assert.Equal(t, ErrActionTimedOut, err) - - // perform action, reconnect client, and then action should be performed - done := make(chan bool) - go func() { - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": 100 * time.Millisecond, - }, 5*time.Second) - close(done) - }() - require.NoError(t, c.Start(context.Background())) - <-done - require.NoError(t, err) - - // perform action, destroy application - done = make(chan bool) - go func() { - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 5*time.Second) - close(done) - }() - <-time.After(100 * time.Millisecond) - as.Destroy() - <-done - require.Error(t, err) - assert.Equal(t, ErrActionCancelled, err) - - // perform action after destroy returns cancelled - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 5*time.Second) - assert.Equal(t, ErrActionCancelled, err) -} - -func newErrorLogger(t *testing.T) *logger.Logger { - t.Helper() - - loggerCfg := logger.DefaultLoggingConfig() - loggerCfg.Level = logp.ErrorLevel - - log, err := logger.NewFromConfig("", loggerCfg, false) - require.NoError(t, err) - return log -} - -func createAndStartServer(t *testing.T, handler Handler, extraConfigs ...func(*Server)) *Server { - t.Helper() - srv, err := New(newErrorLogger(t), "localhost:0", handler, apmtest.DiscardTracer) - require.NoError(t, err) - for _, extra := range extraConfigs { - extra(srv) - } - require.NoError(t, srv.Start()) - return srv -} - -func newClientFromApplicationState(t *testing.T, as *ApplicationState, impl client.StateInterface, actions ...client.Action) client.Client { - t.Helper() - - var err error - var c client.Client - var wg sync.WaitGroup - r, w := io.Pipe() - wg.Add(1) - go func() { - c, err = client.NewFromReader(r, impl, actions...) - wg.Done() - }() - - require.NoError(t, as.WriteConnInfo(w)) - wg.Wait() - require.NoError(t, err) - return c -} - -type StubApp struct { - lock sync.RWMutex - status proto.StateObserved_Status - message string - payload map[string]interface{} -} - -func (a *StubApp) Status() proto.StateObserved_Status { - a.lock.RLock() - defer a.lock.RUnlock() - return a.status -} - -func (a *StubApp) Message() string { - a.lock.RLock() - defer a.lock.RUnlock() - return a.message -} - -type StubHandler struct{} - -func (h *StubHandler) OnStatusChange(as *ApplicationState, status proto.StateObserved_Status, message string, payload map[string]interface{}) { - stub, _ := as.app.(*StubApp) - stub.lock.Lock() - defer stub.lock.Unlock() - stub.status = status - stub.message = message - stub.payload = payload -} - -type StubClientImpl struct { - Lock sync.RWMutex - config string - stop int - error error -} - -func (c *StubClientImpl) Config() string { - c.Lock.RLock() - defer c.Lock.RUnlock() - return c.config -} - -func (c *StubClientImpl) Stop() int { - c.Lock.RLock() - defer c.Lock.RUnlock() - return c.stop -} - -func (c *StubClientImpl) Error() error { - c.Lock.RLock() - defer c.Lock.RUnlock() - return c.error -} - -func (c *StubClientImpl) OnConfig(config string) { - c.Lock.Lock() - defer c.Lock.Unlock() - c.config = config -} - -func (c *StubClientImpl) OnStop() { - c.Lock.Lock() - defer c.Lock.Unlock() - c.stop++ -} - -func (c *StubClientImpl) OnError(err error) { - c.Lock.Lock() - defer c.Lock.Unlock() - c.error = err -} - -type EchoAction struct{} - -func (*EchoAction) Name() string { - return "echo" -} - -func (*EchoAction) Execute(ctx context.Context, request map[string]interface{}) (map[string]interface{}, error) { - echoRaw, ok := request["echo"] - if !ok { - return nil, fmt.Errorf("missing required param of echo") - } - return map[string]interface{}{ - "echo": echoRaw, - }, nil -} - -type SleepAction struct{} - -func (*SleepAction) Name() string { - return "sleep" -} - -func (*SleepAction) Execute(ctx context.Context, request map[string]interface{}) (map[string]interface{}, error) { - sleepRaw, ok := request["sleep"] - if !ok { - return nil, fmt.Errorf("missing required param of slow") - } - sleep, ok := sleepRaw.(float64) - if !ok { - return nil, fmt.Errorf("sleep param must be a number") - } - timer := time.NewTimer(time.Duration(sleep)) - defer timer.Stop() - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-timer.C: - } - - return map[string]interface{}{}, nil -} - -func waitFor(check func() error) error { - started := time.Now() - for { - err := check() - if err == nil { - return nil - } - if time.Since(started) >= 5*time.Second { - return fmt.Errorf("check timed out after 5 second: %w", err) - } - time.Sleep(10 * time.Millisecond) - } -} diff --git a/version/version.go b/version/version.go index 07f3cb8046a..6636201e4dc 100644 --- a/version/version.go +++ b/version/version.go @@ -4,4 +4,4 @@ package version -const defaultBeatVersion = "8.4.0" +const defaultBeatVersion = "8.3.0" From 03e10d966f613e2ea5d9d3bc78fc86d9665078f7 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 20 Jul 2022 16:20:47 -0400 Subject: [PATCH 09/19] More work on switching to v2 runtime. --- .../handlers/handler_action_application.go | 3 +- .../actions/handlers/handler_action_cancel.go | 1 + .../handlers/handler_action_policy_change.go | 7 +- .../handler_action_policy_change_test.go | 3 +- .../handler_action_policy_reassign.go | 1 + .../handlers/handler_action_settings.go | 1 + .../handlers/handler_action_unenroll.go | 1 + .../handlers/handler_action_upgrade.go | 1 + .../actions/handlers/handler_default.go | 1 + .../actions/handlers/handler_unknown.go | 1 + internal/pkg/agent/application/application.go | 29 +- .../application/coordinator/coordinator.go | 12 +- .../application/dispatcher/dispatcher.go | 5 +- .../application/dispatcher/dispatcher_test.go | 5 +- .../application/fleet_server_bootstrap.go | 256 ++++-------------- .../gateway/fleet/fleet_gateway.go | 3 +- .../fleetserver/fleet_gateway_local.go | 116 -------- .../pkg/agent/application/managed_mode.go | 114 +++++--- .../agent/application/managed_mode_test.go | 3 +- internal/pkg/agent/application/once.go | 1 + .../pkg/agent/application/paths/common.go | 3 +- internal/pkg/agent/application/periodic.go | 3 +- .../pkg/agent/application/upgrade/upgrade.go | 3 +- internal/pkg/agent/cmd/diagnostics.go | 42 +-- internal/pkg/agent/cmd/inspect.go | 60 ++-- internal/pkg/agent/cmd/inspect_test.go | 2 + internal/pkg/agent/configuration/grpc.go | 4 + .../pkg/agent/control/cproto/control.pb.go | 5 +- .../agent/control/cproto/control_grpc.pb.go | 5 + internal/pkg/agent/control/server/server.go | 8 +- internal/pkg/agent/install/uninstall.go | 3 +- .../kubernetes_secrets_test.go | 3 +- .../pkg/fleetapi/acker/noop/noop_acker.go | 1 + pkg/component/load.go | 2 +- 34 files changed, 258 insertions(+), 450 deletions(-) delete mode 100644 internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go index b72d17a6847..cb05fde0558 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -7,11 +7,12 @@ package handlers import ( "context" "fmt" + "time" + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/component" - "time" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go index 7944cdd8fd2..3744c7b3b44 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go @@ -7,6 +7,7 @@ package handlers import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go index 572b820c6ce..da00db64657 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go @@ -8,14 +8,15 @@ import ( "bytes" "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "io" "io/ioutil" "sort" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go index 657611ca797..26dc8e916cc 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go @@ -6,10 +6,11 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "sync" "testing" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go index 6f367700819..c233afddb34 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go @@ -6,6 +6,7 @@ package handlers import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_settings.go b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go index bd05205817e..1b3ee2227a3 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_settings.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go @@ -7,6 +7,7 @@ package handlers import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go index 0753dea39e1..64c5b7615ba 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go @@ -7,6 +7,7 @@ package handlers import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go index 01462b47b5c..577b2682303 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go @@ -7,6 +7,7 @@ package handlers import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" diff --git a/internal/pkg/agent/application/actions/handlers/handler_default.go b/internal/pkg/agent/application/actions/handlers/handler_default.go index 2afd8b77a67..4de1e550bcb 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_default.go +++ b/internal/pkg/agent/application/actions/handlers/handler_default.go @@ -6,6 +6,7 @@ package handlers import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" diff --git a/internal/pkg/agent/application/actions/handlers/handler_unknown.go b/internal/pkg/agent/application/actions/handlers/handler_unknown.go index 5a62b60c42f..99a510f2525 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_unknown.go +++ b/internal/pkg/agent/application/actions/handlers/handler_unknown.go @@ -6,6 +6,7 @@ package handlers import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index b5254896fc1..788e189cb60 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -10,9 +10,10 @@ import ( goruntime "runtime" "strconv" - "github.com/elastic/go-sysinfo" "go.elastic.co/apm" + "github.com/elastic/go-sysinfo" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" @@ -78,8 +79,14 @@ func New( upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig) + runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) + if err != nil { + return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) + } + var configMgr coordinator.ConfigManager var managed *managedConfigManager + var compModifiers []coordinator.ComponentsModifier if configuration.IsStandalone(cfg.Fleet) { log.Info("Parsed configuration and determined agent is managed locally") @@ -94,34 +101,34 @@ func New( } } else if configuration.IsFleetServerBootstrap(cfg.Fleet) { log.Info("Parsed configuration and determined agent is in Fleet Server bootstrap mode") - // //return newFleetServerBootstrap(ctx, log, pathConfigFile, rawConfig, statusCtrl, agentInfo, tracer) - // return nil, errors.New("TODO: fleet-server bootstrap mode") + compModifiers = append(compModifiers, FleetServerComponentModifier) + configMgr, err = newFleetServerBootstrapManager(log) + if err != nil { + return nil, err + } } else { - log.Info("Parsed configuration and determined agent is managed by Fleet") - var store storage.Store store, cfg, err = mergeFleetConfig(rawConfig) if err != nil { return nil, err } - managed, err = newManagedConfigManager(log, agentInfo, cfg, store) + log.Info("Parsed configuration and determined agent is managed by Fleet") + + compModifiers = append(compModifiers, FleetServerComponentModifier) + managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) if err != nil { return nil, err } configMgr = managed } - runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) - if err != nil { - return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) - } composable, err := composable.New(log, rawConfig) if err != nil { return nil, errors.New(err, "failed to initialize composable controller") } - coord := coordinator.New(log, specs, reexec, upgrader, runtime, configMgr, composable, caps) + coord := coordinator.New(log, specs, reexec, upgrader, runtime, configMgr, composable, caps, compModifiers...) if managed != nil { // the coordinator requires the config manager as well as in managed-mode the config manager requires the // coordinator, so it must be set here once the coordinator is created diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index 49141ab3524..dac48400179 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -1,11 +1,18 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package coordinator import ( "context" "errors" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "go.elastic.co/apm" + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" @@ -16,7 +23,6 @@ import ( "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" - "go.elastic.co/apm" ) var ( @@ -104,7 +110,7 @@ type VarsManager interface { // ComponentsModifier is a function that takes the computed components model and modifies it before // passing it into the components runtime manager. -type ComponentsModifier func(comps []component.Component) ([]component.Component, error) +type ComponentsModifier func(comps []component.Component, policy map[string]interface{}) ([]component.Component, error) // State provides the current state of the coordinator along with all the current states of components and units. type State struct { @@ -477,7 +483,7 @@ func (c *Coordinator) process(ctx context.Context) (err error) { } for _, modifier := range c.modifiers { - comps, err = modifier(comps) + comps, err = modifier(comps, cfg) if err != nil { return fmt.Errorf("failed to modify components: %w", err) } diff --git a/internal/pkg/agent/application/dispatcher/dispatcher.go b/internal/pkg/agent/application/dispatcher/dispatcher.go index efe70a2846c..170204f184f 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher.go @@ -7,11 +7,12 @@ package dispatcher import ( "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "reflect" "strings" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "go.elastic.co/apm" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" diff --git a/internal/pkg/agent/application/dispatcher/dispatcher_test.go b/internal/pkg/agent/application/dispatcher/dispatcher_test.go index 21d1cf1fd83..4c19779688a 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher_test.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher_test.go @@ -9,11 +9,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" ) type mockHandler struct { diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 069ce839161..2479f854884 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -4,234 +4,74 @@ package application -/* import ( "context" + "time" - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/go-sysinfo" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/stream" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) -// FleetServerBootstrap application, does just enough to get a Fleet Server up and running so enrollment -// can complete. -type FleetServerBootstrap struct { - bgContext context.Context - cancelCtxFn context.CancelFunc - log *logger.Logger - Config configuration.FleetAgentConfig - agentInfo *info.AgentInfo - router pipeline.Router - source source - srv *server.Server -} - -func newFleetServerBootstrap( - ctx context.Context, - log *logger.Logger, - pathConfigFile string, - rawConfig *config.Config, - statusCtrl status.Controller, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (*FleetServerBootstrap, error) { - cfg, err := configuration.NewFromConfig(rawConfig) - if err != nil { - return nil, err - } - - if log == nil { - log, err = logger.NewFromConfig("", cfg.Settings.LoggingConfig, false) - if err != nil { - return nil, err - } - } - - logR := logreporter.NewReporter(log) - - sysInfo, err := sysinfo.Host() - if err != nil { - return nil, errors.New(err, - "fail to get system information", - errors.TypeUnexpected) - } - - bootstrapApp := &FleetServerBootstrap{ - log: log, - agentInfo: agentInfo, - } - - bootstrapApp.bgContext, bootstrapApp.cancelCtxFn = context.WithCancel(ctx) - bootstrapApp.srv, err = server.NewFromConfig(log, cfg.Settings.GRPC, &operation.ApplicationStatusHandler{}, tracer) - if err != nil { - return nil, errors.New(err, "initialize GRPC listener") - } - - reporter := reporting.NewReporter(bootstrapApp.bgContext, log, bootstrapApp.agentInfo, logR) - - if cfg.Settings.MonitoringConfig != nil { - cfg.Settings.MonitoringConfig.Enabled = false - } else { - cfg.Settings.MonitoringConfig = &monitoringCfg.MonitoringConfig{Enabled: false} - } - monitor, err := monitoring.NewMonitor(cfg.Settings) - if err != nil { - return nil, errors.New(err, "failed to initialize monitoring") - } - - router, err := router.New(log, stream.Factory(bootstrapApp.bgContext, agentInfo, cfg.Settings, bootstrapApp.srv, reporter, monitor, statusCtrl)) - if err != nil { - return nil, errors.New(err, "fail to initialize pipeline router") - } - bootstrapApp.router = router - - emit, err := bootstrapEmitter( - bootstrapApp.bgContext, - log, - agentInfo, - router, - &pipeline.ConfigModifiers{ - Filters: []pipeline.FilterFunc{filters.StreamChecker, modifiers.InjectFleet(rawConfig, sysInfo.Info(), agentInfo)}, +// injectFleetServerInput is the base configuration that is used plus the FleetServerComponentModifier that adjusts +// the components before sending them to the runtime manager. +var injectFleetServerInput = config.MustNewConfigFrom(map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "hosts": []string{"localhost:9200"}, }, - ) - if err != nil { - return nil, err - } - - loader := config.NewLoader(log, "") - discover := discoverer(pathConfigFile, cfg.Settings.Path) - bootstrapApp.source = newOnce(log, discover, loader, emit) - return bootstrapApp, nil -} - -// Routes returns a list of routes handled by server. -func (b *FleetServerBootstrap) Routes() *sorted.Set { - return b.router.Routes() -} - -// Start starts a managed elastic-agent. -func (b *FleetServerBootstrap) Start() error { - b.log.Info("Agent is starting") - defer b.log.Info("Agent is stopped") - - if err := b.srv.Start(); err != nil { - return err - } - if err := b.source.Start(); err != nil { - return err - } + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "fleet-server", + }, + }, +}) - return nil +// FleetServerComponentModifier modifies the comps to inject extra information from the policy into +// the Fleet Server component and units needed to run Fleet Server correctly. +func FleetServerComponentModifier(comps []component.Component, policy map[string]interface{}) ([]component.Component, error) { + return comps, nil } -// Stop stops a local agent. -func (b *FleetServerBootstrap) Stop() error { - err := b.source.Stop() - b.cancelCtxFn() - b.router.Shutdown() - b.srv.Stop() - return err -} +type fleetServerBootstrapManager struct { + log *logger.Logger -// AgentInfo retrieves elastic-agent information. -func (b *FleetServerBootstrap) AgentInfo() *info.AgentInfo { - return b.agentInfo + ch chan coordinator.ConfigChange + errCh chan error } -func bootstrapEmitter(ctx context.Context, log *logger.Logger, agentInfo transpiler.AgentInfo, router pipeline.Router, modifiers *pipeline.ConfigModifiers) (pipeline.EmitterFunc, error) { - ch := make(chan *config.Config) - - go func() { - for { - var c *config.Config - select { - case <-ctx.Done(): - return - case c = <-ch: - } - - err := emit(ctx, log, agentInfo, router, modifiers, c) - if err != nil { - log.Error(err) - } - } - }() - - return func(ctx context.Context, c *config.Config) error { - span, _ := apm.StartSpan(ctx, "emit", "app.internal") - defer span.End() - ch <- c - return nil +func newFleetServerBootstrapManager( + log *logger.Logger, +) (*fleetServerBootstrapManager, error) { + return &fleetServerBootstrapManager{ + log: log, + ch: make(chan coordinator.ConfigChange), + errCh: make(chan error), }, nil } -func emit(ctx context.Context, log *logger.Logger, agentInfo transpiler.AgentInfo, router pipeline.Router, modifiers *pipeline.ConfigModifiers, c *config.Config) error { - if err := info.InjectAgentConfig(c); err != nil { - return err - } +func (m *fleetServerBootstrapManager) Run(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() - // perform and verify ast translation - m, err := c.ToMapStr() - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - ast, err := transpiler.NewAST(m) - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - for _, filter := range modifiers.Filters { - if err := filter(log, ast); err != nil { - return errors.New(err, "failed to filter configuration", errors.TypeConfig) - } + m.log.Debugf("injecting fleet-server for bootstrap") + select { + case <-ctx.Done(): + return ctx.Err() + case m.ch <- &localConfigChange{injectFleetServerInput}: } - // overwrite the inputs to only have a single fleet-server input - transpiler.Insert(ast, transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("fleet-server")), - }), - }), "inputs") + <-ctx.Done() + return ctx.Err() +} - spec, ok := program.SupportedMap["fleet-server"] - if !ok { - return errors.New("missing required fleet-server program specification") - } - ok, err = program.DetectProgram(spec.Rules, spec.When, spec.Constraints, agentInfo, ast) - if err != nil { - return errors.New(err, "failed parsing the configuration") - } - if !ok { - return errors.New("bootstrap configuration is incorrect causing fleet-server to not be started") - } +func (m *fleetServerBootstrapManager) Errors() <-chan error { + return m.errCh +} - return router.Route(ctx, ast.HashStr(), map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: { - { - Spec: spec, - Config: ast, - }, - }, - }) +func (m *fleetServerBootstrapManager) Watch() <-chan coordinator.ConfigChange { + return m.ch } -*/ diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index c7de26f9609..8c146f72459 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -8,11 +8,12 @@ import ( "context" stderr "errors" "fmt" + "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" dispatcher2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "time" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" diff --git a/internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go b/internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go deleted file mode 100644 index 763f003b25f..00000000000 --- a/internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleetserver - -import ( - "context" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const gatewayWait = 2 * time.Second - -var injectFleetServerInput = map[string]interface{}{ - // outputs is replaced by the fleet-server.spec - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": []string{"localhost:9200"}, - }, - }, - "inputs": []interface{}{ - map[string]interface{}{ - "type": "fleet-server", - }, - }, -} - -// fleetServerWrapper wraps the fleetGateway to ensure that a local Fleet Server is running before trying -// to communicate with the gateway, which is local to the Elastic Agent. -type fleetServerWrapper struct { - bgContext context.Context - log *logger.Logger - cfg *configuration.FleetAgentConfig - injectedCfg *config.Config - wrapped gateway.FleetGateway - emitter pipeline.EmitterFunc -} - -// New creates a new fleet server gateway wrapping another fleet gateway. -func New( - ctx context.Context, - log *logger.Logger, - cfg *configuration.FleetAgentConfig, - rawConfig *config.Config, - wrapped gateway.FleetGateway, - emitter pipeline.EmitterFunc, - injectServer bool) (gateway.FleetGateway, error) { - if cfg.Server == nil || !injectServer { - // not running a local Fleet Server - return wrapped, nil - } - - injectedCfg, err := injectFleetServer(rawConfig) - if err != nil { - return nil, errors.New(err, "failed to inject fleet-server input to start local Fleet Server", errors.TypeConfig) - } - - return &fleetServerWrapper{ - bgContext: ctx, - log: log, - cfg: cfg, - injectedCfg: injectedCfg, - wrapped: wrapped, - emitter: emitter, - }, nil -} - -// Start starts the gateway. -func (w *fleetServerWrapper) Start() error { - err := w.emitter(context.Background(), w.injectedCfg) - if err != nil { - return err - } - sleep(w.bgContext, gatewayWait) - return w.wrapped.Start() -} - -// SetClient sets the client for the wrapped gateway. -func (w *fleetServerWrapper) SetClient(c client.Sender) { - w.wrapped.SetClient(c) -} - -func injectFleetServer(rawConfig *config.Config) (*config.Config, error) { - cfg := map[string]interface{}{} - err := rawConfig.Unpack(cfg) - if err != nil { - return nil, err - } - cloned, err := config.NewConfigFrom(cfg) - if err != nil { - return nil, err - } - err = cloned.Merge(injectFleetServerInput) - if err != nil { - return nil, err - } - return cloned, nil -} - -func sleep(ctx context.Context, d time.Duration) { - t := time.NewTimer(d) - defer t.Stop() - select { - case <-ctx.Done(): - case <-t.C: - } -} diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index b68d010ada4..893b7541606 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -7,7 +7,10 @@ package application import ( "context" "fmt" - handlers2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers" + "time" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" fleetgateway "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway/fleet" @@ -21,9 +24,10 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/fleet" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/lazy" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/retrier" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" + fleetclient "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/queue" "github.com/elastic/elastic-agent/internal/pkg/remote" + "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -35,6 +39,7 @@ type managedConfigManager struct { store storage.Store stateStore *store.StateStore actionQueue *queue.ActionQueue + runtime *runtime.Manager coord *coordinator.Coordinator ch chan coordinator.ConfigChange @@ -46,8 +51,9 @@ func newManagedConfigManager( agentInfo *info.AgentInfo, cfg *configuration.Configuration, storeSaver storage.Store, + runtime *runtime.Manager, ) (*managedConfigManager, error) { - client, err := client.NewAuthWithConfig(log, cfg.Fleet.AccessAPIKey, cfg.Fleet.Client) + client, err := fleetclient.NewAuthWithConfig(log, cfg.Fleet.AccessAPIKey, cfg.Fleet.Client) if err != nil { return nil, errors.New(err, "fail to create API client", @@ -74,6 +80,7 @@ func newManagedConfigManager( store: storeSaver, stateStore: stateStore, actionQueue: actionQueue, + runtime: runtime, ch: make(chan coordinator.ConfigChange), errCh: make(chan error), }, nil @@ -101,7 +108,7 @@ func (m *managedConfigManager) Run(ctx context.Context) error { defer gatewayCancel() // Create the actionDispatcher. - actionDispatcher, err := newManagedActionDispatcher(m, gatewayCancel) + actionDispatcher, policyChanger, err := newManagedActionDispatcher(m, gatewayCancel) if err != nil { return err } @@ -128,7 +135,7 @@ func (m *managedConfigManager) Run(ctx context.Context) error { }() actions := m.stateStore.Actions() - //stateRestored := false + stateRestored := false if len(actions) > 0 && !m.wasUnenrolled() { // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a // persisted action on disk we should be able to ask Fleet to get the latest configuration. @@ -136,7 +143,16 @@ func (m *managedConfigManager) Run(ctx context.Context) error { if err := store.ReplayActions(ctx, m.log, actionDispatcher, actionAcker, actions...); err != nil { m.log.Errorf("could not recover state, error %+v, skipping...", err) } - //stateRestored = true + stateRestored = true + } + + // In the case this is the first start and this Elastic Agent is running a Fleet Server; we need to ensure that + // the Fleet Server is running before the Fleet gateway is started. + if !stateRestored && m.cfg.Fleet.Server != nil { + err = m.initFleetServer(ctx) + if err != nil { + return fmt.Errorf("failed to initialize Fleet Server: %w", err) + } } gateway, err := fleetgateway.New( @@ -153,6 +169,12 @@ func (m *managedConfigManager) Run(ctx context.Context) error { return err } + // Not running a Fleet Server so the gateway and acker can be changed based on the configuration change. + if m.cfg.Fleet.Server == nil { + policyChanger.AddSetter(gateway) + policyChanger.AddSetter(ack) + } + // Proxy errors from the gateway to our own channel. go func() { for { @@ -178,22 +200,6 @@ func (m *managedConfigManager) Run(ctx context.Context) error { gatewayErrCh <- err }() - /* - gateway, err = localgateway.New(ctx, m.log, m.cfg.Fleet, rawConfig, gateway, emit, !stateRestored) - if err != nil { - return nil, err - } - // add the acker and gateway to setters, so the they can be updated - // when the hosts for Fleet Server are updated by the policy. - if cfg.Fleet.Server == nil { - // setters only set when not running a local Fleet Server - policyChanger.AddSetter(gateway) - policyChanger.AddSetter(acker) - } - - managedApplication.gateway = gateway - */ - <-ctx.Done() return <-gatewayErrCh } @@ -216,13 +222,53 @@ func (m *managedConfigManager) wasUnenrolled() bool { return false } -func newManagedActionDispatcher(m *managedConfigManager, canceller context.CancelFunc) (*dispatcher.ActionDispatcher, error) { - actionDispatcher, err := dispatcher.New(m.log, handlers2.NewDefault(m.log)) +func (m *managedConfigManager) initFleetServer(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + + m.log.Debugf("injecting basic fleet-server for first start") + select { + case <-ctx.Done(): + return ctx.Err() + case m.ch <- &localConfigChange{injectFleetServerInput}: + } + + m.log.Debugf("watching fleet-server-default component state") + sub := m.runtime.Subscribe(ctx, "fleet-server-default") + for { + select { + case <-ctx.Done(): + return ctx.Err() + case state := <-sub.Ch(): + if fleetServerRunning(state) { + m.log.With("state", state).Debugf("fleet-server-default component is running") + return nil + } + m.log.With("state", state).Debugf("fleet-server-default component is not running") + } + } +} + +func fleetServerRunning(state runtime.ComponentState) bool { + if state.State == client.UnitStateHealthy || state.State == client.UnitStateDegraded { + for key, unit := range state.Units { + if key.UnitType == client.UnitTypeInput && key.UnitID == "fleet-server-default-fleet-server" { + if unit.State == client.UnitStateHealthy || unit.State == client.UnitStateDegraded { + return true + } + } + } + } + return false +} + +func newManagedActionDispatcher(m *managedConfigManager, canceller context.CancelFunc) (*dispatcher.ActionDispatcher, *handlers.PolicyChange, error) { + actionDispatcher, err := dispatcher.New(m.log, handlers.NewDefault(m.log)) if err != nil { - return nil, err + return nil, nil, err } - policyChanger := handlers2.NewPolicyChange( + policyChanger := handlers.NewPolicyChange( m.log, m.agentInfo, m.cfg, @@ -237,12 +283,12 @@ func newManagedActionDispatcher(m *managedConfigManager, canceller context.Cance actionDispatcher.MustRegister( &fleetapi.ActionPolicyReassign{}, - handlers2.NewPolicyReassign(m.log), + handlers.NewPolicyReassign(m.log), ) actionDispatcher.MustRegister( &fleetapi.ActionUnenroll{}, - handlers2.NewUnenroll( + handlers.NewUnenroll( m.log, m.ch, []context.CancelFunc{canceller}, @@ -252,12 +298,12 @@ func newManagedActionDispatcher(m *managedConfigManager, canceller context.Cance actionDispatcher.MustRegister( &fleetapi.ActionUpgrade{}, - handlers2.NewUpgrade(m.log, m.coord), + handlers.NewUpgrade(m.log, m.coord), ) actionDispatcher.MustRegister( &fleetapi.ActionSettings{}, - handlers2.NewSettings( + handlers.NewSettings( m.log, m.agentInfo, m.coord, @@ -266,7 +312,7 @@ func newManagedActionDispatcher(m *managedConfigManager, canceller context.Cance actionDispatcher.MustRegister( &fleetapi.ActionCancel{}, - handlers2.NewCancel( + handlers.NewCancel( m.log, m.actionQueue, ), @@ -274,13 +320,13 @@ func newManagedActionDispatcher(m *managedConfigManager, canceller context.Cance actionDispatcher.MustRegister( &fleetapi.ActionApp{}, - handlers2.NewAppAction(m.log, m.coord), + handlers.NewAppAction(m.log, m.coord), ) actionDispatcher.MustRegister( &fleetapi.ActionUnknown{}, - handlers2.NewUnknown(m.log), + handlers.NewUnknown(m.log), ) - return actionDispatcher, nil + return actionDispatcher, policyChanger, nil } diff --git a/internal/pkg/agent/application/managed_mode_test.go b/internal/pkg/agent/application/managed_mode_test.go index 2ebf26cc1aa..206dd90079e 100644 --- a/internal/pkg/agent/application/managed_mode_test.go +++ b/internal/pkg/agent/application/managed_mode_test.go @@ -7,9 +7,10 @@ package application import ( "context" "encoding/json" + "testing" + handlers2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers" "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" - "testing" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" diff --git a/internal/pkg/agent/application/once.go b/internal/pkg/agent/application/once.go index 2ef6940cff9..7326612950b 100644 --- a/internal/pkg/agent/application/once.go +++ b/internal/pkg/agent/application/once.go @@ -7,6 +7,7 @@ package application import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 315f515a13c..79b114144cc 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -128,11 +128,12 @@ func Data() string { return filepath.Join(Top(), "data") } +// Components returns the component directory for Agent func Components() string { return filepath.Join(Home(), "components") } -// Logs returns a the log directory for Agent +// Logs returns the log directory for Agent func Logs() string { return logsPath } diff --git a/internal/pkg/agent/application/periodic.go b/internal/pkg/agent/application/periodic.go index fcc2967c163..bb9f717a7af 100644 --- a/internal/pkg/agent/application/periodic.go +++ b/internal/pkg/agent/application/periodic.go @@ -6,10 +6,11 @@ package application import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "strings" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/filewatcher" diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 4f1db73ea79..cb6f827d8e2 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -8,13 +8,14 @@ import ( "bytes" "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "io/ioutil" "os" "path/filepath" "runtime" "strings" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/otiai10/copy" "go.elastic.co/apm" diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index ad3a77e1a4f..718e1c4596f 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -428,29 +428,31 @@ func gatherConfig() (AgentConfig, error) { } cfg.ConfigRendered = mapCFG - // Gather vars to render process config - isStandalone, err := isStandalone(renderedCFG) - if err != nil { - return AgentConfig{}, err - } + /* + // Gather vars to render process config + isStandalone, err := isStandalone(renderedCFG) + if err != nil { + return AgentConfig{}, err + } - log, err := newErrorLogger() - if err != nil { - return AgentConfig{}, err - } + log, err := newErrorLogger() + if err != nil { + return AgentConfig{}, err + } - // Get process config - uses same approach as inspect output command. - // Does not contact server process to request configs. - pMap, err := getProgramsFromConfig(log, agentInfo, renderedCFG, isStandalone) - if err != nil { - return AgentConfig{}, err - } - cfg.AppConfig = make(map[string]interface{}, 0) - for rk, programs := range pMap { - for _, p := range programs { - cfg.AppConfig[p.Identifier()+"_"+rk] = p.Configuration() + // Get process config - uses same approach as inspect output command. + // Does not contact server process to request configs. + pMap, err := getProgramsFromConfig(log, agentInfo, renderedCFG, isStandalone) + if err != nil { + return AgentConfig{}, err } - } + cfg.AppConfig = make(map[string]interface{}, 0) + for rk, programs := range pMap { + for _, p := range programs { + cfg.AppConfig[p.Identifier()+"_"+rk] = p.Configuration() + } + } + */ return cfg, nil } diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index a320efde43e..03ea093cab6 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -5,30 +5,11 @@ package cmd import ( - "context" - "fmt" - "os" - "github.com/spf13/cobra" - "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/cli" - "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/config/operations" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/go-sysinfo" ) func newInspectCommandWithArgs(s []string, streams *cli.IOStreams) *cobra.Command { @@ -38,10 +19,12 @@ func newInspectCommandWithArgs(s []string, streams *cli.IOStreams) *cobra.Comman Long: "Shows current configuration of the agent", Args: cobra.ExactArgs(0), Run: func(c *cobra.Command, args []string) { - if err := inspectConfig(paths.ConfigFile()); err != nil { - fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) - os.Exit(1) - } + /* + if err := inspectConfig(paths.ConfigFile()); err != nil { + fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) + os.Exit(1) + } + */ }, } @@ -57,19 +40,22 @@ func newInspectOutputCommandWithArgs(_ []string) *cobra.Command { Long: "Displays configuration generated for output.\nIf no output is specified list of output is displayed", Args: cobra.MaximumNArgs(2), RunE: func(c *cobra.Command, args []string) error { - outName, _ := c.Flags().GetString("output") - program, _ := c.Flags().GetString("program") - cfgPath := paths.ConfigFile() - agentInfo, err := info.NewAgentInfo(false) - if err != nil { - return err - } - - if outName == "" { - return inspectOutputs(cfgPath, agentInfo) - } - - return inspectOutput(cfgPath, outName, program, agentInfo) + /* + outName, _ := c.Flags().GetString("output") + program, _ := c.Flags().GetString("program") + cfgPath := paths.ConfigFile() + agentInfo, err := info.NewAgentInfo(false) + if err != nil { + return err + } + + if outName == "" { + return inspectOutputs(cfgPath, agentInfo) + } + + return inspectOutput(cfgPath, outName, program, agentInfo) + */ + return nil }, } @@ -79,6 +65,7 @@ func newInspectOutputCommandWithArgs(_ []string) *cobra.Command { return cmd } +/* func inspectConfig(cfgPath string) error { err := tryContainerLoadPaths() if err != nil { @@ -386,6 +373,7 @@ func (w *waitForCompose) Watch() <-chan []*transpiler.Vars { func (w *waitForCompose) Wait() { <-w.done } +*/ func isStandalone(cfg *config.Config) (bool, error) { c, err := configuration.NewFromConfig(cfg) diff --git a/internal/pkg/agent/cmd/inspect_test.go b/internal/pkg/agent/cmd/inspect_test.go index 361f77d5904..3a5ffb35380 100644 --- a/internal/pkg/agent/cmd/inspect_test.go +++ b/internal/pkg/agent/cmd/inspect_test.go @@ -4,6 +4,7 @@ package cmd +/* import ( "testing" ) @@ -49,3 +50,4 @@ func TestGetFleetInput(t *testing.T) { }) } } +*/ diff --git a/internal/pkg/agent/configuration/grpc.go b/internal/pkg/agent/configuration/grpc.go index 24d095e068d..6624e6a0c08 100644 --- a/internal/pkg/agent/configuration/grpc.go +++ b/internal/pkg/agent/configuration/grpc.go @@ -1,3 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package configuration import "fmt" diff --git a/internal/pkg/agent/control/cproto/control.pb.go b/internal/pkg/agent/control/cproto/control.pb.go index 8b15dd2f5ac..7ada35a4fe0 100644 --- a/internal/pkg/agent/control/cproto/control.pb.go +++ b/internal/pkg/agent/control/cproto/control.pb.go @@ -11,10 +11,11 @@ package cproto import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( diff --git a/internal/pkg/agent/control/cproto/control_grpc.pb.go b/internal/pkg/agent/control/cproto/control_grpc.pb.go index b3be5c72626..c9e97f7047a 100644 --- a/internal/pkg/agent/control/cproto/control_grpc.pb.go +++ b/internal/pkg/agent/control/cproto/control_grpc.pb.go @@ -1,3 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 @@ -8,6 +12,7 @@ package cproto import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 3b2de4adf41..620a5b7b024 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -8,12 +8,14 @@ import ( "context" "encoding/json" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "net" + "sync" + "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "google.golang.org/grpc" - "net" - "sync" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/control" "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 35d00df59ef..b43db32892d 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -7,13 +7,14 @@ package install import ( "context" "fmt" - "github.com/kardianos/service" "os" "os/exec" "path/filepath" "runtime" "strings" + "github.com/kardianos/service" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go index f0b56bb82a6..388f33074bb 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go @@ -6,10 +6,11 @@ package kubernetessecrets import ( "context" - ctesting "github.com/elastic/elastic-agent/internal/pkg/composable/testing" "testing" "time" + ctesting "github.com/elastic/elastic-agent/internal/pkg/composable/testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/internal/pkg/fleetapi/acker/noop/noop_acker.go b/internal/pkg/fleetapi/acker/noop/noop_acker.go index 644210bd199..7c410d73bc0 100644 --- a/internal/pkg/fleetapi/acker/noop/noop_acker.go +++ b/internal/pkg/fleetapi/acker/noop/noop_acker.go @@ -6,6 +6,7 @@ package noop import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" diff --git a/pkg/component/load.go b/pkg/component/load.go index 8a22d5d5f91..62a983f1f9d 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -149,7 +149,7 @@ func LoadRuntimeSpecs(dir string, platform PlatformDetail, opts ...LoadRuntimeOp // Inputs returns the list of supported inputs for this platform. func (r *RuntimeSpecs) Inputs() []string { inputs := make([]string, 0, len(r.inputSpecs)) - for inputType, _ := range r.inputSpecs { + for inputType := range r.inputSpecs { inputs = append(inputs, inputType) } return inputs From f30873b5d998025b11b4341d1cc46850aed74220 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 20 Jul 2022 16:37:01 -0400 Subject: [PATCH 10/19] Cleanup some imports. --- .../handlers/handler_action_application.go | 6 +++--- .../actions/handlers/handler_action_cancel.go | 3 +-- .../handlers/handler_action_policy_change.go | 7 +++---- .../handler_action_policy_change_test.go | 8 +++----- .../handlers/handler_action_policy_reassign.go | 3 +-- .../actions/handlers/handler_action_settings.go | 3 +-- .../actions/handlers/handler_action_unenroll.go | 3 +-- .../actions/handlers/handler_action_upgrade.go | 3 +-- .../actions/handlers/handler_default.go | 3 +-- .../actions/handlers/handler_unknown.go | 3 +-- .../application/gateway/fleet/fleet_gateway.go | 16 +++++++--------- 11 files changed, 23 insertions(+), 35 deletions(-) diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go index cb05fde0558..d36f8f1d33a 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -10,12 +10,12 @@ import ( "time" "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go index 3744c7b3b44..bb48b2bd753 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go @@ -8,9 +8,8 @@ import ( "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go index da00db64657..a3f4ff0b3ea 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go @@ -13,18 +13,17 @@ import ( "sort" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "gopkg.in/yaml.v2" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/remote" "github.com/elastic/elastic-agent/pkg/core/logger" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go index 26dc8e916cc..34114153875 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go @@ -9,15 +9,13 @@ import ( "sync" "testing" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go index c233afddb34..2044052d48b 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go @@ -7,9 +7,8 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_settings.go b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go index 1b3ee2227a3..eed67a50682 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_settings.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go @@ -9,12 +9,11 @@ import ( "fmt" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go index 64c5b7615ba..045d52a4fcf 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go @@ -10,9 +10,8 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go index 577b2682303..1760c96d369 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go @@ -9,9 +9,8 @@ import ( "fmt" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/actions/handlers/handler_default.go b/internal/pkg/agent/application/actions/handlers/handler_default.go index 4de1e550bcb..dd59861f584 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_default.go +++ b/internal/pkg/agent/application/actions/handlers/handler_default.go @@ -7,9 +7,8 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/actions/handlers/handler_unknown.go b/internal/pkg/agent/application/actions/handlers/handler_unknown.go index 99a510f2525..e0fdf4c81ab 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_unknown.go +++ b/internal/pkg/agent/application/actions/handlers/handler_unknown.go @@ -7,9 +7,8 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 8c146f72459..d8c21a580d3 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -11,17 +11,15 @@ import ( "time" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - dispatcher2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" - agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" - + "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/core/backoff" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/scheduler" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -72,7 +70,7 @@ type actionQueue interface { type fleetGateway struct { log *logger.Logger - dispatcher dispatcher2.Dispatcher + dispatcher dispatcher.Dispatcher client client.Sender scheduler scheduler.Scheduler settings *fleetGatewaySettings @@ -90,7 +88,7 @@ func New( log *logger.Logger, agentInfo agentInfo, client client.Sender, - d dispatcher2.Dispatcher, + d dispatcher.Dispatcher, acker acker.Acker, stateFetcher coordinator.StateFetcher, stateStore stateStore, @@ -117,7 +115,7 @@ func newFleetGatewayWithScheduler( settings *fleetGatewaySettings, agentInfo agentInfo, client client.Sender, - d dispatcher2.Dispatcher, + d dispatcher.Dispatcher, scheduler scheduler.Scheduler, acker acker.Acker, stateFetcher coordinator.StateFetcher, From 6c1d9f632c661c9ca0ceba8445788007aed69653 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 20 Jul 2022 16:39:07 -0400 Subject: [PATCH 11/19] More import cleanups. --- internal/pkg/agent/application/dispatcher/dispatcher.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/pkg/agent/application/dispatcher/dispatcher.go b/internal/pkg/agent/application/dispatcher/dispatcher.go index 170204f184f..8628cf5a59f 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher.go @@ -10,13 +10,12 @@ import ( "reflect" "strings" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "go.elastic.co/apm" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) From e0f4f7209ab3c588445a849dc9233090f1ee13f6 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 20 Jul 2022 16:40:22 -0400 Subject: [PATCH 12/19] Add TODO to FleetServerComponentModifier. --- internal/pkg/agent/application/fleet_server_bootstrap.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 2479f854884..bfb801b9dde 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -33,6 +33,7 @@ var injectFleetServerInput = config.MustNewConfigFrom(map[string]interface{}{ // FleetServerComponentModifier modifies the comps to inject extra information from the policy into // the Fleet Server component and units needed to run Fleet Server correctly. func FleetServerComponentModifier(comps []component.Component, policy map[string]interface{}) ([]component.Component, error) { + // TODO(blakerouse): Need to add logic to update the Fleet Server component with extra information from the policy. return comps, nil } From b95cf8c5c7f7f0b6103ccd34b885ea3b31f5d682 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 21 Jul 2022 10:34:54 -0400 Subject: [PATCH 13/19] More cleanup and removals. --- .gitignore | 7 - dev-tools/cmd/buildspec/buildspec.go | 131 ---- internal/pkg/agent/application/application.go | 33 +- .../application/upgrade}/artifact/artifact.go | 16 +- .../application/upgrade}/artifact/config.go | 0 .../artifact/download/composed/downloader.go | 8 +- .../download/composed/downloader_test.go | 12 +- .../artifact/download/composed/verifier.go | 8 +- .../download/composed/verifier_test.go | 14 +- .../upgrade}/artifact/download/downloader.go | 4 +- .../artifact/download/fs/downloader.go | 21 +- .../drop/beat-8.0.0-darwin-x86_64.tar.gz | 0 .../beat-8.0.0-darwin-x86_64.tar.gz.sha512 | 0 .../upgrade}/artifact/download/fs/verifier.go | 9 +- .../artifact/download/fs/verifier_test.go | 14 +- .../artifact/download/http/downloader.go | 23 +- .../artifact/download/http/downloader_test.go | 3 +- .../artifact/download/http/elastic_test.go | 26 +- .../artifact/download/http/headers_rtt.go | 0 .../download/http/headers_rtt_test.go | 0 .../artifact/download/http/verifier.go | 20 +- .../download/localremote/downloader.go | 12 +- .../artifact/download/localremote/verifier.go | 12 +- .../artifact/download/snapshot/downloader.go | 7 +- .../artifact/download/snapshot/verifier.go | 6 +- .../upgrade}/artifact/download/verifier.go | 5 +- .../application/upgrade/step_download.go | 41 +- .../pkg/agent/application/upgrade/upgrade.go | 8 +- internal/pkg/agent/cmd/container.go | 3 +- internal/pkg/agent/cmd/diagnostics.go | 1 + internal/pkg/agent/cmd/inspect.go | 2 + internal/pkg/agent/configuration/settings.go | 3 +- internal/pkg/agent/install/uninstall.go | 102 +-- internal/pkg/agent/operation/common_test.go | 177 ----- internal/pkg/agent/operation/monitoring.go | 679 ------------------ .../pkg/agent/operation/monitoring_test.go | 268 ------- internal/pkg/agent/operation/operation.go | 83 --- .../pkg/agent/operation/operation_config.go | 67 -- .../agent/operation/operation_retryable.go | 95 --- .../pkg/agent/operation/operation_start.go | 65 -- .../pkg/agent/operation/operation_stop.go | 50 -- internal/pkg/agent/operation/operator.go | 396 ---------- .../pkg/agent/operation/operator_handlers.go | 103 --- internal/pkg/agent/operation/operator_test.go | 489 ------------- .../tests/downloads/-1.0-darwin-x86_64.tar.gz | 0 .../configurable-1.0-darwin-x86_64.tar.gz | 0 .../configurable-1.0-darwin-x86_64/README.md | 1 - .../configurable-1.0-darwin-x86_64/main.go | 104 --- .../serviceable-1.0-darwin-x86_64/README.md | 1 - .../serviceable-1.0-darwin-x86_64/main.go | 142 ---- internal/pkg/agent/program/program.go | 307 -------- internal/pkg/agent/program/program_test.go | 614 ---------------- internal/pkg/agent/program/spec.go | 111 --- internal/pkg/agent/program/spec_test.go | 149 ---- internal/pkg/agent/program/supported.go | 41 -- .../testdata/audit_config-auditbeat.yml | 164 ----- .../agent/program/testdata/audit_config.yml | 104 --- .../agent/program/testdata/enabled_false.yml | 17 - .../program/testdata/enabled_output_false.yml | 17 - .../testdata/enabled_output_true-filebeat.yml | 38 - .../program/testdata/enabled_output_true.yml | 17 - .../testdata/enabled_true-filebeat.yml | 38 - .../agent/program/testdata/enabled_true.yml | 22 - .../agent/program/testdata/endpoint_arm.yml | 117 --- .../endpoint_basic-endpoint-security.yml | 113 --- .../agent/program/testdata/endpoint_basic.yml | 115 --- .../program/testdata/endpoint_no_fleet.yml | 102 --- .../testdata/endpoint_unknown_output.yml | 107 --- .../testdata/fleet_server-fleet-server.yml | 33 - .../agent/program/testdata/fleet_server.yml | 51 -- .../agent/program/testdata/journal_config.yml | 21 - .../logstash_config-endpoint-security.yml | 114 --- ...logstash_config-filebeat-elasticsearch.yml | 43 -- .../testdata/logstash_config-filebeat.yml | 39 - .../testdata/logstash_config-fleet-server.yml | 18 - .../testdata/logstash_config-heartbeat.yml | 29 - .../testdata/logstash_config-metricbeat.yml | 89 --- .../testdata/logstash_config-packetbeat.yml | 34 - .../program/testdata/logstash_config.yml | 212 ------ .../testdata/namespace-endpoint-security.yml | 114 --- .../program/testdata/namespace-filebeat.yml | 71 -- .../testdata/namespace-fleet-server.yml | 18 - .../program/testdata/namespace-heartbeat.yml | 30 - .../program/testdata/namespace-metricbeat.yml | 91 --- .../program/testdata/namespace-packetbeat.yml | 35 - .../pkg/agent/program/testdata/namespace.yml | 201 ------ .../single_config-endpoint-security.yml | 115 --- .../testdata/single_config-filebeat.yml | 71 -- .../testdata/single_config-fleet-server.yml | 18 - .../testdata/single_config-heartbeat.yml | 31 - .../testdata/single_config-metricbeat.yml | 91 --- .../testdata/single_config-packetbeat.yml | 36 - .../agent/program/testdata/single_config.yml | 202 ------ .../testdata/synthetics_config-heartbeat.yml | 66 -- .../program/testdata/synthetics_config.yml | 31 - .../testdata/usecases/enabled_output_true.yml | 17 - .../testdata/usecases/enabled_true.yml | 22 - .../testdata/usecases/endpoint_basic.yml | 115 --- .../testdata/usecases/fleet_server.yml | 51 -- .../enabled_output_true.filebeat.golden.yml | 38 - .../enabled_true.filebeat.golden.yml | 38 - ...ndpoint_basic.endpoint-security.golden.yml | 112 --- .../fleet_server.fleet-server.golden.yml | 33 - .../namespace.endpoint-security.golden.yml | 113 --- .../generated/namespace.filebeat.golden.yml | 70 -- .../namespace.fleet-server.golden.yml | 18 - .../generated/namespace.heartbeat.golden.yml | 30 - .../generated/namespace.metricbeat.golden.yml | 98 --- .../generated/namespace.packetbeat.golden.yml | 35 - ...single_config.endpoint-security.golden.yml | 114 --- .../single_config.filebeat.golden.yml | 73 -- .../single_config.fleet-server.golden.yml | 18 - .../single_config.heartbeat.golden.yml | 31 - .../single_config.metricbeat.golden.yml | 99 --- .../single_config.packetbeat.golden.yml | 36 - .../synthetics_config.heartbeat.golden.yml | 68 -- .../program/testdata/usecases/namespace.yml | 201 ------ .../testdata/usecases/single_config.yml | 204 ------ .../testdata/usecases/synthetics_config.yml | 31 - .../install/atomic/atomic_installer.go | 97 --- .../install/atomic/atomic_installer_test.go | 120 ---- .../install/awaitable/awaitable_installer.go | 57 -- .../pkg/artifact/install/dir/dir_checker.go | 26 - .../artifact/install/hooks/hooks_installer.go | 60 -- internal/pkg/artifact/install/installer.go | 84 --- .../pkg/artifact/install/tar/tar_installer.go | 141 ---- .../pkg/artifact/install/zip/zip_installer.go | 163 ----- .../uninstall/hooks/hooks_uninstaller.go | 35 - .../pkg/artifact/uninstall/uninstaller.go | 29 - internal/pkg/core/app/descriptor.go | 91 --- internal/pkg/core/app/execution_context.go | 43 -- internal/pkg/core/app/process_cred.go | 65 -- internal/pkg/core/app/process_cred_other.go | 13 - internal/pkg/core/app/spec.go | 25 - internal/pkg/core/app/tag.go | 24 - .../core/monitoring/beats/beats_monitor.go | 3 +- .../pkg/core/monitoring/server/process.go | 3 +- internal/pkg/core/plugin/common.go | 84 --- internal/pkg/core/plugin/common_test.go | 95 --- internal/pkg/core/plugin/process/app.go | 319 -------- internal/pkg/core/plugin/process/configure.go | 59 -- internal/pkg/core/plugin/process/start.go | 190 ----- internal/pkg/core/plugin/process/status.go | 124 ---- internal/pkg/core/plugin/process/stdlogger.go | 56 -- .../pkg/core/plugin/process/stdlogger_test.go | 70 -- .../pkg/core/plugin/process/watch_posix.go | 32 - .../pkg/core/plugin/process/watch_windows.go | 54 -- internal/pkg/core/plugin/service/app.go | 373 ---------- internal/pkg/core/state/state.go | 99 --- internal/pkg/core/status/reporter.go | 302 -------- internal/pkg/core/status/reporter_test.go | 101 --- internal/spec/apm-server.yml | 38 - internal/spec/auditbeat.yml | 98 --- internal/spec/cloudbeat.yml | 35 - internal/spec/endpoint.yml | 67 -- internal/spec/filebeat.yml | 115 --- internal/spec/fleet-server.yml | 69 -- internal/spec/heartbeat.yml | 24 - internal/spec/metricbeat.yml | 98 --- internal/spec/osquerybeat.yml | 39 - internal/spec/packetbeat.yml | 22 - magefile.go | 30 +- pkg/component/platforms.go | 30 + 163 files changed, 217 insertions(+), 12518 deletions(-) delete mode 100644 dev-tools/cmd/buildspec/buildspec.go rename internal/pkg/{ => agent/application/upgrade}/artifact/artifact.go (74%) rename internal/pkg/{ => agent/application/upgrade}/artifact/config.go (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/composed/downloader.go (79%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/composed/downloader_test.go (81%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/composed/verifier.go (83%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/composed/verifier_test.go (83%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/downloader.go (68%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/downloader.go (75%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/verifier.go (88%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/verifier_test.go (91%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/downloader.go (91%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/downloader_test.go (98%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/elastic_test.go (81%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/headers_rtt.go (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/headers_rtt_test.go (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/verifier.go (84%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/localremote/downloader.go (66%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/localremote/verifier.go (68%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/snapshot/downloader.go (91%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/snapshot/verifier.go (70%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/verifier.go (97%) delete mode 100644 internal/pkg/agent/operation/common_test.go delete mode 100644 internal/pkg/agent/operation/monitoring.go delete mode 100644 internal/pkg/agent/operation/monitoring_test.go delete mode 100644 internal/pkg/agent/operation/operation.go delete mode 100644 internal/pkg/agent/operation/operation_config.go delete mode 100644 internal/pkg/agent/operation/operation_retryable.go delete mode 100644 internal/pkg/agent/operation/operation_start.go delete mode 100644 internal/pkg/agent/operation/operation_stop.go delete mode 100644 internal/pkg/agent/operation/operator.go delete mode 100644 internal/pkg/agent/operation/operator_handlers.go delete mode 100644 internal/pkg/agent/operation/operator_test.go delete mode 100644 internal/pkg/agent/operation/tests/downloads/-1.0-darwin-x86_64.tar.gz delete mode 100644 internal/pkg/agent/operation/tests/downloads/configurable-1.0-darwin-x86_64.tar.gz delete mode 100644 internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/README.md delete mode 100644 internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/main.go delete mode 100644 internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/README.md delete mode 100644 internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go delete mode 100644 internal/pkg/agent/program/program.go delete mode 100644 internal/pkg/agent/program/program_test.go delete mode 100644 internal/pkg/agent/program/spec.go delete mode 100644 internal/pkg/agent/program/spec_test.go delete mode 100644 internal/pkg/agent/program/supported.go delete mode 100644 internal/pkg/agent/program/testdata/audit_config-auditbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/audit_config.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_false.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_output_false.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_output_true-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_output_true.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_true-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_true.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_arm.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_basic-endpoint-security.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_basic.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_no_fleet.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_unknown_output.yml delete mode 100644 internal/pkg/agent/program/testdata/fleet_server-fleet-server.yml delete mode 100644 internal/pkg/agent/program/testdata/fleet_server.yml delete mode 100644 internal/pkg/agent/program/testdata/journal_config.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-endpoint-security.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-filebeat-elasticsearch.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-fleet-server.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-heartbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-packetbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-endpoint-security.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-fleet-server.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-heartbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-metricbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-packetbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-endpoint-security.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-fleet-server.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-heartbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-metricbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-packetbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config.yml delete mode 100644 internal/pkg/agent/program/testdata/synthetics_config-heartbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/synthetics_config.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/enabled_output_true.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/enabled_true.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/endpoint_basic.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/fleet_server.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/enabled_output_true.filebeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/enabled_true.filebeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/endpoint_basic.endpoint-security.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/fleet_server.fleet-server.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.endpoint-security.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.filebeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.fleet-server.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.heartbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.packetbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.endpoint-security.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.fleet-server.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.heartbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.packetbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/synthetics_config.heartbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/namespace.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/single_config.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/synthetics_config.yml delete mode 100644 internal/pkg/artifact/install/atomic/atomic_installer.go delete mode 100644 internal/pkg/artifact/install/atomic/atomic_installer_test.go delete mode 100644 internal/pkg/artifact/install/awaitable/awaitable_installer.go delete mode 100644 internal/pkg/artifact/install/dir/dir_checker.go delete mode 100644 internal/pkg/artifact/install/hooks/hooks_installer.go delete mode 100644 internal/pkg/artifact/install/installer.go delete mode 100644 internal/pkg/artifact/install/tar/tar_installer.go delete mode 100644 internal/pkg/artifact/install/zip/zip_installer.go delete mode 100644 internal/pkg/artifact/uninstall/hooks/hooks_uninstaller.go delete mode 100644 internal/pkg/artifact/uninstall/uninstaller.go delete mode 100644 internal/pkg/core/app/descriptor.go delete mode 100644 internal/pkg/core/app/execution_context.go delete mode 100644 internal/pkg/core/app/process_cred.go delete mode 100644 internal/pkg/core/app/process_cred_other.go delete mode 100644 internal/pkg/core/app/spec.go delete mode 100644 internal/pkg/core/app/tag.go delete mode 100644 internal/pkg/core/plugin/common.go delete mode 100644 internal/pkg/core/plugin/common_test.go delete mode 100644 internal/pkg/core/plugin/process/app.go delete mode 100644 internal/pkg/core/plugin/process/configure.go delete mode 100644 internal/pkg/core/plugin/process/start.go delete mode 100644 internal/pkg/core/plugin/process/status.go delete mode 100644 internal/pkg/core/plugin/process/stdlogger.go delete mode 100644 internal/pkg/core/plugin/process/stdlogger_test.go delete mode 100644 internal/pkg/core/plugin/process/watch_posix.go delete mode 100644 internal/pkg/core/plugin/process/watch_windows.go delete mode 100644 internal/pkg/core/plugin/service/app.go delete mode 100644 internal/pkg/core/state/state.go delete mode 100644 internal/pkg/core/status/reporter.go delete mode 100644 internal/pkg/core/status/reporter_test.go delete mode 100644 internal/spec/apm-server.yml delete mode 100644 internal/spec/auditbeat.yml delete mode 100644 internal/spec/cloudbeat.yml delete mode 100644 internal/spec/endpoint.yml delete mode 100644 internal/spec/filebeat.yml delete mode 100644 internal/spec/fleet-server.yml delete mode 100644 internal/spec/heartbeat.yml delete mode 100644 internal/spec/metricbeat.yml delete mode 100644 internal/spec/osquerybeat.yml delete mode 100644 internal/spec/packetbeat.yml diff --git a/.gitignore b/.gitignore index 7bfae9cc392..3939307f99c 100644 --- a/.gitignore +++ b/.gitignore @@ -54,13 +54,6 @@ elastic-agent.yml.* fleet.yml fleet.yml.lock fleet.yml.old -internal/pkg/agent/operation/tests/scripts/short--1.0.yml -internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/configurable -internal/pkg/agent/operation/tests/scripts/servicable-1.0-darwin-x86/configurable -internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/configurable -internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/serviceable -internal/pkg/agent/operation/tests/scripts/configurable -internal/pkg/agent/operation/tests/scripts/serviceable internal/pkg/agent/application/fleet.yml internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/exec pkg/component/fake/fake diff --git a/dev-tools/cmd/buildspec/buildspec.go b/dev-tools/cmd/buildspec/buildspec.go deleted file mode 100644 index ea16fbe6968..00000000000 --- a/dev-tools/cmd/buildspec/buildspec.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "os" - "text/template" - - lic "github.com/elastic/elastic-agent/dev-tools/licenses" - "github.com/elastic/elastic-agent/pkg/packer" -) - -var ( - flagSet *flag.FlagSet - input string - output string - license string -) - -func init() { - // NOTE: This uses its own flagSet because dev-tools/licenses sets flags. - flagSet = flag.NewFlagSet("buildspec", flag.ExitOnError) - flagSet.StringVar(&input, "in", "", "Source of input. \"-\" means reading from stdin") - flagSet.StringVar(&output, "out", "-", "Output path. \"-\" means writing to stdout") - flagSet.StringVar(&license, "license", "Elastic", "License header for generated file.") -} - -var tmpl = template.Must(template.New("specs").Parse(` -{{ .License }} -// Code generated by elastic-agent/internals/dev-tools/buildspec/buildspec.go - DO NOT EDIT. - -package program - -import ( - "strings" - - "github.com/elastic/elastic-agent/pkg/packer" -) - -var Supported []Spec -var SupportedMap map[string]Spec - -func init() { - // Packed Files - {{ range $i, $f := .Files -}} - // {{ $f }} - {{ end -}} - unpacked := packer.MustUnpack("{{ .Pack }}") - SupportedMap = make(map[string]Spec) - - for f, v := range unpacked { - s, err:= NewSpecFromBytes(v) - if err != nil { - panic("Cannot read spec from " + f + ": " + err.Error()) - } - Supported = append(Supported, s) - SupportedMap[strings.ToLower(s.Cmd)] = s - } -} -`)) - -func main() { - if err := flagSet.Parse(os.Args[1:]); err != nil { - fmt.Fprintf(os.Stderr, "error: %v", err) - os.Exit(1) - } - - if len(input) == 0 { - fmt.Fprintln(os.Stderr, "Invalid input source") - os.Exit(1) - } - - l, err := lic.Find(license) - if err != nil { - fmt.Fprintf(os.Stderr, "problem to retrieve the license, error: %+v", err) - os.Exit(1) - } - - data, err := gen(l) - if err != nil { - fmt.Fprintf(os.Stderr, "Error while generating the file, err: %+v\n", err) - os.Exit(1) - } - - if output == "-" { - os.Stdout.Write(data) - return - } else { - if err = ioutil.WriteFile(output, data, 0o600); err != nil { - fmt.Fprintf(os.Stderr, "Error writing data to file %q: %v\n", output, data) - os.Exit(1) - } - } - - return -} - -func gen(l string) ([]byte, error) { - pack, files, err := packer.Pack(input) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - err = tmpl.Execute(&buf, struct { - Pack string - Files []string - License string - }{ - Pack: pack, - Files: files, - License: l, - }) - if err != nil { - return nil, err - } - - formatted, err := format.Source(buf.Bytes()) - if err != nil { - return nil, err - } - - return formatted, nil -} diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 788e189cb60..7aabbba127c 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -7,13 +7,9 @@ package application import ( "fmt" "path/filepath" - goruntime "runtime" - "strconv" "go.elastic.co/apm" - "github.com/elastic/go-sysinfo" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" @@ -35,18 +31,15 @@ type discoverFunc func() ([]string, error) // ErrNoConfiguration is returned when no configuration are found. var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) -// PlatformModifier can modify the platform details before the runtime specifications are loaded. -type PlatformModifier func(detail component.PlatformDetail) component.PlatformDetail - // New creates a new Agent and bootstrap the required subsystem. func New( log *logger.Logger, agentInfo *info.AgentInfo, reexec coordinator.ReExecManager, tracer *apm.Tracer, - modifiers ...PlatformModifier, + modifiers ...component.PlatformModifier, ) (*coordinator.Coordinator, error) { - platform, err := getPlatformDetail(modifiers...) + platform, err := component.LoadPlatformDetail(modifiers...) if err != nil { return nil, fmt.Errorf("failed to gather system information: %w", err) } @@ -137,28 +130,6 @@ func New( return coord, nil } -func getPlatformDetail(modifiers ...PlatformModifier) (component.PlatformDetail, error) { - info, err := sysinfo.Host() - if err != nil { - return component.PlatformDetail{}, err - } - os := info.Info().OS - detail := component.PlatformDetail{ - Platform: component.Platform{ - OS: goruntime.GOOS, - Arch: goruntime.GOARCH, - GOOS: goruntime.GOOS, - }, - Family: os.Family, - Major: strconv.Itoa(os.Major), - Minor: strconv.Itoa(os.Minor), - } - for _, modifier := range modifiers { - detail = modifier(detail) - } - return detail, nil -} - func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.Configuration, error) { path := paths.AgentConfigFile() store := storage.NewEncryptedDiskStore(path) diff --git a/internal/pkg/artifact/artifact.go b/internal/pkg/agent/application/upgrade/artifact/artifact.go similarity index 74% rename from internal/pkg/artifact/artifact.go rename to internal/pkg/agent/application/upgrade/artifact/artifact.go index ebe84b95db7..c0e8c84a9d8 100644 --- a/internal/pkg/artifact/artifact.go +++ b/internal/pkg/agent/application/upgrade/artifact/artifact.go @@ -9,7 +9,6 @@ import ( "path/filepath" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" ) var packageArchMap = map[string]string{ @@ -24,20 +23,27 @@ var packageArchMap = map[string]string{ "darwin-binary-universal": "darwin-universal.tar.gz", } +// Artifact provides info for fetching from artifact store. +type Artifact struct { + Name string + Cmd string + Artifact string +} + // GetArtifactName constructs a path to a downloaded artifact -func GetArtifactName(spec program.Spec, version, operatingSystem, arch string) (string, error) { +func GetArtifactName(a Artifact, version, operatingSystem, arch string) (string, error) { key := fmt.Sprintf("%s-binary-%s", operatingSystem, arch) suffix, found := packageArchMap[key] if !found { return "", errors.New(fmt.Sprintf("'%s' is not a valid combination for a package", key), errors.TypeConfig) } - return fmt.Sprintf("%s-%s-%s", spec.CommandName(), version, suffix), nil + return fmt.Sprintf("%s-%s-%s", a.Cmd, version, suffix), nil } // GetArtifactPath returns a full path of artifact for a program in specific version -func GetArtifactPath(spec program.Spec, version, operatingSystem, arch, targetDir string) (string, error) { - artifactName, err := GetArtifactName(spec, version, operatingSystem, arch) +func GetArtifactPath(a Artifact, version, operatingSystem, arch, targetDir string) (string, error) { + artifactName, err := GetArtifactName(a, version, operatingSystem, arch) if err != nil { return "", err } diff --git a/internal/pkg/artifact/config.go b/internal/pkg/agent/application/upgrade/artifact/config.go similarity index 100% rename from internal/pkg/artifact/config.go rename to internal/pkg/agent/application/upgrade/artifact/config.go diff --git a/internal/pkg/artifact/download/composed/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go similarity index 79% rename from internal/pkg/artifact/download/composed/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go index 0b8504172f3..84e353ff661 100644 --- a/internal/pkg/artifact/download/composed/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go @@ -10,8 +10,8 @@ import ( "github.com/hashicorp/go-multierror" "go.elastic.co/apm" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" ) // Downloader is a downloader with a predefined set of downloaders. @@ -34,13 +34,13 @@ func NewDownloader(downloaders ...download.Downloader) *Downloader { // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, spec program.Spec, version string) (string, error) { +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (string, error) { var err error span, ctx := apm.StartSpan(ctx, "download", "app.internal") defer span.End() for _, d := range e.dd { - s, e := d.Download(ctx, spec, version) + s, e := d.Download(ctx, a, version) if e == nil { return s, nil } diff --git a/internal/pkg/artifact/download/composed/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go similarity index 81% rename from internal/pkg/artifact/download/composed/downloader_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go index 4964c090ef9..5d97ed167a9 100644 --- a/internal/pkg/artifact/download/composed/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go @@ -9,17 +9,17 @@ import ( "errors" "testing" - "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" + "github.com/stretchr/testify/assert" ) type FailingDownloader struct { called bool } -func (d *FailingDownloader) Download(ctx context.Context, _ program.Spec, _ string) (string, error) { +func (d *FailingDownloader) Download(ctx context.Context, _ artifact.Artifact, _ string) (string, error) { d.called = true return "", errors.New("failing") } @@ -30,7 +30,7 @@ type SuccDownloader struct { called bool } -func (d *SuccDownloader) Download(ctx context.Context, _ program.Spec, _ string) (string, error) { +func (d *SuccDownloader) Download(ctx context.Context, _ artifact.Artifact, _ string) (string, error) { d.called = true return "succ", nil } @@ -59,7 +59,7 @@ func TestComposed(t *testing.T) { for _, tc := range testCases { d := NewDownloader(tc.downloaders[0], tc.downloaders[1]) - r, _ := d.Download(context.TODO(), program.Spec{Name: "a"}, "b") + r, _ := d.Download(context.TODO(), artifact.Artifact{Name: "a"}, "b") assert.Equal(t, tc.expectedResult, r == "succ") diff --git a/internal/pkg/artifact/download/composed/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go similarity index 83% rename from internal/pkg/artifact/download/composed/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go index 9fb60d20007..26c714d8c52 100644 --- a/internal/pkg/artifact/download/composed/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go @@ -9,8 +9,8 @@ import ( "github.com/hashicorp/go-multierror" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" ) // Verifier is a verifier with a predefined set of verifiers. @@ -32,13 +32,13 @@ func NewVerifier(verifiers ...download.Verifier) *Verifier { } // Verify checks the package from configured source. -func (e *Verifier) Verify(spec program.Spec, version string) error { +func (e *Verifier) Verify(a artifact.Artifact, version string) error { var err error var checksumMismatchErr *download.ChecksumMismatchError var invalidSignatureErr *download.InvalidSignatureError for _, v := range e.vv { - e := v.Verify(spec, version) + e := v.Verify(a, version) if e == nil { // Success return nil diff --git a/internal/pkg/artifact/download/composed/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go similarity index 83% rename from internal/pkg/artifact/download/composed/verifier_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go index a6b4b3a603e..110717627dc 100644 --- a/internal/pkg/artifact/download/composed/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go @@ -8,17 +8,17 @@ import ( "errors" "testing" - "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" + "github.com/stretchr/testify/assert" ) type ErrorVerifier struct { called bool } -func (d *ErrorVerifier) Verify(spec program.Spec, version string) error { +func (d *ErrorVerifier) Verify(a artifact.Artifact, version string) error { d.called = true return errors.New("failing") } @@ -29,7 +29,7 @@ type FailVerifier struct { called bool } -func (d *FailVerifier) Verify(spec program.Spec, version string) error { +func (d *FailVerifier) Verify(a artifact.Artifact, version string) error { d.called = true return &download.InvalidSignatureError{} } @@ -40,7 +40,7 @@ type SuccVerifier struct { called bool } -func (d *SuccVerifier) Verify(spec program.Spec, version string) error { +func (d *SuccVerifier) Verify(a artifact.Artifact, version string) error { d.called = true return nil } @@ -74,7 +74,7 @@ func TestVerifier(t *testing.T) { for _, tc := range testCases { d := NewVerifier(tc.verifiers[0], tc.verifiers[1], tc.verifiers[2]) - err := d.Verify(program.Spec{Name: "a", Cmd: "a", Artifact: "a/a"}, "b") + err := d.Verify(artifact.Artifact{Name: "a", Cmd: "a", Artifact: "a/a"}, "b") assert.Equal(t, tc.expectedResult, err == nil) diff --git a/internal/pkg/artifact/download/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/downloader.go similarity index 68% rename from internal/pkg/artifact/download/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/downloader.go index f3f134ba588..19e102ab3c9 100644 --- a/internal/pkg/artifact/download/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/downloader.go @@ -7,10 +7,10 @@ package download import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" ) // Downloader is an interface allowing download of an artifact type Downloader interface { - Download(ctx context.Context, spec program.Spec, version string) (string, error) + Download(ctx context.Context, a artifact.Artifact, version string) (string, error) } diff --git a/internal/pkg/artifact/download/fs/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go similarity index 75% rename from internal/pkg/artifact/download/fs/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go index 9d518a1c7fb..46e85defc31 100644 --- a/internal/pkg/artifact/download/fs/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go @@ -14,9 +14,8 @@ import ( "go.elastic.co/apm" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" ) const ( @@ -39,7 +38,7 @@ func NewDownloader(config *artifact.Config) *Downloader { // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, spec program.Spec, version string) (_ string, err error) { +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (_ string, err error) { span, ctx := apm.StartSpan(ctx, "download", "app.internal") defer span.End() downloadedFiles := make([]string, 0, 2) @@ -53,24 +52,24 @@ func (e *Downloader) Download(ctx context.Context, spec program.Spec, version st }() // download from source to dest - path, err := e.download(e.config.OS(), spec, version) + path, err := e.download(e.config.OS(), a, version) downloadedFiles = append(downloadedFiles, path) if err != nil { return "", err } - hashPath, err := e.downloadHash(e.config.OS(), spec, version) + hashPath, err := e.downloadHash(e.config.OS(), a, version) downloadedFiles = append(downloadedFiles, hashPath) return path, err } -func (e *Downloader) download(operatingSystem string, spec program.Spec, version string) (string, error) { - filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) +func (e *Downloader) download(operatingSystem string, a artifact.Artifact, version string) (string, error) { + filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") } - fullPath, err := artifact.GetArtifactPath(spec, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) if err != nil { return "", errors.New(err, "generating package path failed") } @@ -78,13 +77,13 @@ func (e *Downloader) download(operatingSystem string, spec program.Spec, version return e.downloadFile(filename, fullPath) } -func (e *Downloader) downloadHash(operatingSystem string, spec program.Spec, version string) (string, error) { - filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) +func (e *Downloader) downloadHash(operatingSystem string, a artifact.Artifact, version string) (string, error) { + filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") } - fullPath, err := artifact.GetArtifactPath(spec, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) if err != nil { return "", errors.New(err, "generating package path failed") } diff --git a/internal/pkg/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz similarity index 100% rename from internal/pkg/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz rename to internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz diff --git a/internal/pkg/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 similarity index 100% rename from internal/pkg/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 rename to internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 diff --git a/internal/pkg/artifact/download/fs/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go similarity index 88% rename from internal/pkg/artifact/download/fs/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go index 441f7f68f26..4913d1731d2 100644 --- a/internal/pkg/artifact/download/fs/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go @@ -10,10 +10,9 @@ import ( "os" "path/filepath" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" ) const ( @@ -47,8 +46,8 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte) (*Veri // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. -func (v *Verifier) Verify(spec program.Spec, version string) error { - filename, err := artifact.GetArtifactName(spec, version, v.config.OS(), v.config.Arch()) +func (v *Verifier) Verify(a artifact.Artifact, version string) error { + filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { return errors.New(err, "retrieving package name") } diff --git a/internal/pkg/artifact/download/fs/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go similarity index 91% rename from internal/pkg/artifact/download/fs/verifier_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go index a758f90f300..59f0fcb2b57 100644 --- a/internal/pkg/artifact/download/fs/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go @@ -18,9 +18,9 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/release" ) @@ -29,7 +29,7 @@ const ( ) var ( - beatSpec = program.Spec{Name: "Filebeat", Cmd: "filebeat", Artifact: "beat/filebeat"} + beatSpec = artifact.Artifact{Name: "Filebeat", Cmd: "filebeat", Artifact: "beat/filebeat"} ) func TestFetchVerify(t *testing.T) { @@ -38,7 +38,7 @@ func TestFetchVerify(t *testing.T) { installPath := filepath.Join("testdata", "install") targetPath := filepath.Join("testdata", "download") ctx := context.Background() - s := program.Spec{Name: "Beat", Cmd: "beat", Artifact: "beats/filebeat"} + s := artifact.Artifact{Name: "Beat", Cmd: "beat", Artifact: "beats/filebeat"} version := "8.0.0" targetFilePath := filepath.Join(targetPath, "beat-8.0.0-darwin-x86_64.tar.gz") @@ -216,8 +216,8 @@ func TestVerify(t *testing.T) { os.RemoveAll(config.DropPath) } -func prepareTestCase(beatSpec program.Spec, version string, cfg *artifact.Config) error { - filename, err := artifact.GetArtifactName(beatSpec, version, cfg.OperatingSystem, cfg.Architecture) +func prepareTestCase(a artifact.Artifact, version string, cfg *artifact.Config) error { + filename, err := artifact.GetArtifactName(a, version, cfg.OperatingSystem, cfg.Architecture) if err != nil { return err } diff --git a/internal/pkg/artifact/download/http/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go similarity index 91% rename from internal/pkg/artifact/download/http/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go index 5f68816c58a..2f9a7748660 100644 --- a/internal/pkg/artifact/download/http/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go @@ -21,9 +21,8 @@ import ( "github.com/elastic/elastic-agent-libs/atomic" "github.com/elastic/elastic-agent-libs/transport/httpcommon" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/release" ) @@ -75,8 +74,8 @@ func NewDownloaderWithClient(log progressLogger, config *artifact.Config, client // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, spec program.Spec, version string) (_ string, err error) { - remoteArtifact := spec.Artifact +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (_ string, err error) { + remoteArtifact := a.Artifact downloadedFiles := make([]string, 0, 2) defer func() { if err != nil { @@ -87,13 +86,13 @@ func (e *Downloader) Download(ctx context.Context, spec program.Spec, version st }() // download from source to dest - path, err := e.download(ctx, remoteArtifact, e.config.OS(), spec, version) + path, err := e.download(ctx, remoteArtifact, e.config.OS(), a, version) downloadedFiles = append(downloadedFiles, path) if err != nil { return "", err } - hashPath, err := e.downloadHash(ctx, remoteArtifact, e.config.OS(), spec, version) + hashPath, err := e.downloadHash(ctx, remoteArtifact, e.config.OS(), a, version) downloadedFiles = append(downloadedFiles, hashPath) return path, err } @@ -115,13 +114,13 @@ func (e *Downloader) composeURI(artifactName, packageName string) (string, error return uri.String(), nil } -func (e *Downloader) download(ctx context.Context, remoteArtifact string, operatingSystem string, spec program.Spec, version string) (string, error) { - filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) +func (e *Downloader) download(ctx context.Context, remoteArtifact string, operatingSystem string, a artifact.Artifact, version string) (string, error) { + filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") } - fullPath, err := artifact.GetArtifactPath(spec, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) if err != nil { return "", errors.New(err, "generating package path failed") } @@ -129,13 +128,13 @@ func (e *Downloader) download(ctx context.Context, remoteArtifact string, operat return e.downloadFile(ctx, remoteArtifact, filename, fullPath) } -func (e *Downloader) downloadHash(ctx context.Context, remoteArtifact string, operatingSystem string, spec program.Spec, version string) (string, error) { - filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) +func (e *Downloader) downloadHash(ctx context.Context, remoteArtifact string, operatingSystem string, a artifact.Artifact, version string) (string, error) { + filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") } - fullPath, err := artifact.GetArtifactPath(spec, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) if err != nil { return "", errors.New(err, "generating package path failed") } diff --git a/internal/pkg/artifact/download/http/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go similarity index 98% rename from internal/pkg/artifact/download/http/downloader_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go index 6fa2777c02f..5a6762b40fc 100644 --- a/internal/pkg/artifact/download/http/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go @@ -16,12 +16,13 @@ import ( "testing" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/docker/go-units" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/artifact" ) func TestDownloadBodyError(t *testing.T) { diff --git a/internal/pkg/artifact/download/http/elastic_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go similarity index 81% rename from internal/pkg/artifact/download/http/elastic_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go index e76bc92fd06..66bdad9dcd4 100644 --- a/internal/pkg/artifact/download/http/elastic_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go @@ -21,8 +21,8 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -33,7 +33,7 @@ const ( ) var ( - beatSpec = program.Spec{ + beatSpec = artifact.Artifact{ Name: "filebeat", Cmd: "filebeat", Artifact: "beats/filebeat", @@ -165,16 +165,16 @@ func getRandomTestCases() []testCase { func getElasticCoClient() http.Client { correctValues := map[string]struct{}{ - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "i386.deb"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "amd64.deb"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "i686.rpm"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "x86_64.rpm"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "linux-x86.tar.gz"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "linux-arm64.tar.gz"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "linux-x86_64.tar.gz"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "windows-x86.zip"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "windows-x86_64.zip"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "darwin-x86_64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i386.deb"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "amd64.deb"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i686.rpm"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "x86_64.rpm"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-arm64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86_64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86.zip"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86_64.zip"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "darwin-x86_64.tar.gz"): {}, } handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/internal/pkg/artifact/download/http/headers_rtt.go b/internal/pkg/agent/application/upgrade/artifact/download/http/headers_rtt.go similarity index 100% rename from internal/pkg/artifact/download/http/headers_rtt.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/headers_rtt.go diff --git a/internal/pkg/artifact/download/http/headers_rtt_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/headers_rtt_test.go similarity index 100% rename from internal/pkg/artifact/download/http/headers_rtt_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/headers_rtt_test.go diff --git a/internal/pkg/artifact/download/http/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go similarity index 84% rename from internal/pkg/artifact/download/http/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go index 1fe855fa2af..4568c0f2cdd 100644 --- a/internal/pkg/artifact/download/http/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go @@ -14,10 +14,10 @@ import ( "strings" "github.com/elastic/elastic-agent-libs/transport/httpcommon" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" ) const ( @@ -62,8 +62,8 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte) (*Veri // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. -func (v *Verifier) Verify(spec program.Spec, version string) error { - fullPath, err := artifact.GetArtifactPath(spec, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) +func (v *Verifier) Verify(a artifact.Artifact, version string) error { + fullPath, err := artifact.GetArtifactPath(a, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) if err != nil { return errors.New(err, "retrieving package path") } @@ -77,7 +77,7 @@ func (v *Verifier) Verify(spec program.Spec, version string) error { return err } - if err = v.verifyAsc(spec, version); err != nil { + if err = v.verifyAsc(a, version); err != nil { var invalidSignatureErr *download.InvalidSignatureError if errors.As(err, &invalidSignatureErr) { os.Remove(fullPath + ".asc") @@ -88,23 +88,23 @@ func (v *Verifier) Verify(spec program.Spec, version string) error { return nil } -func (v *Verifier) verifyAsc(spec program.Spec, version string) error { +func (v *Verifier) verifyAsc(a artifact.Artifact, version string) error { if len(v.pgpBytes) == 0 { // no pgp available skip verification process return nil } - filename, err := artifact.GetArtifactName(spec, version, v.config.OS(), v.config.Arch()) + filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { return errors.New(err, "retrieving package name") } - fullPath, err := artifact.GetArtifactPath(spec, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) if err != nil { return errors.New(err, "retrieving package path") } - ascURI, err := v.composeURI(filename, spec.Artifact) + ascURI, err := v.composeURI(filename, a.Artifact) if err != nil { return errors.New(err, "composing URI for fetching asc file", errors.TypeNetwork) } diff --git a/internal/pkg/artifact/download/localremote/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go similarity index 66% rename from internal/pkg/artifact/download/localremote/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go index d3877e9430f..72f4c1534cf 100644 --- a/internal/pkg/artifact/download/localremote/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go @@ -5,12 +5,12 @@ package localremote import ( - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/composed" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/fs" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/snapshot" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/composed" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/fs" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/snapshot" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/artifact/download/localremote/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go similarity index 68% rename from internal/pkg/artifact/download/localremote/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go index 9327e44539b..970ea342744 100644 --- a/internal/pkg/artifact/download/localremote/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go @@ -5,12 +5,12 @@ package localremote import ( - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/composed" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/fs" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/snapshot" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/composed" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/fs" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/snapshot" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/artifact/download/snapshot/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go similarity index 91% rename from internal/pkg/artifact/download/snapshot/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go index 2fbe027ae4b..b858fca0fc3 100644 --- a/internal/pkg/artifact/download/snapshot/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go @@ -9,10 +9,11 @@ import ( "fmt" "strings" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/artifact/download/snapshot/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go similarity index 70% rename from internal/pkg/artifact/download/snapshot/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go index e4e4e667be7..31ad26a0474 100644 --- a/internal/pkg/artifact/download/snapshot/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go @@ -5,9 +5,9 @@ package snapshot import ( - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" ) // NewVerifier creates a downloader which first checks local directory diff --git a/internal/pkg/artifact/download/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go similarity index 97% rename from internal/pkg/artifact/download/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/verifier.go index 2501cde9fe8..bca1d72f93a 100644 --- a/internal/pkg/artifact/download/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go @@ -15,10 +15,11 @@ import ( "path/filepath" "strings" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "golang.org/x/crypto/openpgp" //nolint:staticcheck // crypto/openpgp is only receiving security updates. "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" ) // ChecksumMismatchError indicates the expected checksum for a file does not @@ -54,7 +55,7 @@ type Verifier interface { // *download.ChecksumMismatchError. And if the GPG signature is invalid then // Verify returns a *download.InvalidSignatureError. Use errors.As() to // check error types. - Verify(spec program.Spec, version string) error + Verify(a artifact.Artifact, version string) error } // VerifySHA512Hash checks that a sidecar file containing a sha512 checksum diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index 27e4b9c9e9c..76cad7a416a 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -8,16 +8,17 @@ import ( "context" "strings" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + download2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + composed2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/composed" + fs2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/fs" + http2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/localremote" + snapshot2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/snapshot" + "go.elastic.co/apm" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/composed" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/fs" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" - downloader "github.com/elastic/elastic-agent/internal/pkg/artifact/download/localremote" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/snapshot" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -50,57 +51,57 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri return "", errors.New(err, "initiating fetcher") } - path, err := fetcher.Download(ctx, agentSpec, version) + path, err := fetcher.Download(ctx, agentArtifact, version) if err != nil { return "", errors.New(err, "failed upgrade of agent binary") } - if err := verifier.Verify(agentSpec, version); err != nil { + if err := verifier.Verify(agentArtifact, version); err != nil { return "", errors.New(err, "failed verification of agent binary") } return path, nil } -func newDownloader(version string, log *logger.Logger, settings *artifact.Config) (download.Downloader, error) { +func newDownloader(version string, log *logger.Logger, settings *artifact.Config) (download2.Downloader, error) { if !strings.HasSuffix(version, "-SNAPSHOT") { - return downloader.NewDownloader(log, settings) + return localremote.NewDownloader(log, settings) } // try snapshot repo before official - snapDownloader, err := snapshot.NewDownloader(log, settings, version) + snapDownloader, err := snapshot2.NewDownloader(log, settings, version) if err != nil { return nil, err } - httpDownloader, err := http.NewDownloader(log, settings) + httpDownloader, err := http2.NewDownloader(log, settings) if err != nil { return nil, err } - return composed.NewDownloader(fs.NewDownloader(settings), snapDownloader, httpDownloader), nil + return composed2.NewDownloader(fs2.NewDownloader(settings), snapDownloader, httpDownloader), nil } -func newVerifier(version string, log *logger.Logger, settings *artifact.Config) (download.Verifier, error) { +func newVerifier(version string, log *logger.Logger, settings *artifact.Config) (download2.Verifier, error) { allowEmptyPgp, pgp := release.PGP() if !strings.HasSuffix(version, "-SNAPSHOT") { - return downloader.NewVerifier(log, settings, allowEmptyPgp, pgp) + return localremote.NewVerifier(log, settings, allowEmptyPgp, pgp) } - fsVerifier, err := fs.NewVerifier(settings, allowEmptyPgp, pgp) + fsVerifier, err := fs2.NewVerifier(settings, allowEmptyPgp, pgp) if err != nil { return nil, err } - snapshotVerifier, err := snapshot.NewVerifier(settings, allowEmptyPgp, pgp, version) + snapshotVerifier, err := snapshot2.NewVerifier(settings, allowEmptyPgp, pgp, version) if err != nil { return nil, err } - remoteVerifier, err := http.NewVerifier(settings, allowEmptyPgp, pgp) + remoteVerifier, err := http2.NewVerifier(settings, allowEmptyPgp, pgp) if err != nil { return nil, err } - return composed.NewVerifier(fsVerifier, snapshotVerifier, remoteVerifier), nil + return composed2.NewVerifier(fsVerifier, snapshotVerifier, remoteVerifier), nil } diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index cb6f827d8e2..444927a6052 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -14,8 +14,6 @@ import ( "runtime" "strings" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/otiai10/copy" "go.elastic.co/apm" @@ -23,11 +21,11 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -40,7 +38,7 @@ const ( ) var ( - agentSpec = program.Spec{ + agentArtifact = artifact.Artifact{ Name: "elastic-agent", Cmd: agentName, Artifact: "beats/" + agentName, diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index ff1b40936d1..b3a086439e3 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -22,6 +22,8 @@ import ( "syscall" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/spf13/cobra" "gopkg.in/yaml.v2" @@ -32,7 +34,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/artifact/install/tar" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index 718e1c4596f..a1412c147e1 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -428,6 +428,7 @@ func gatherConfig() (AgentConfig, error) { } cfg.ConfigRendered = mapCFG + // TODO(blakerouse): Fix diagnostic command for Elastic Agent v2 /* // Gather vars to render process config isStandalone, err := isStandalone(renderedCFG) diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index 03ea093cab6..0c51bb40460 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -19,6 +19,7 @@ func newInspectCommandWithArgs(s []string, streams *cli.IOStreams) *cobra.Comman Long: "Shows current configuration of the agent", Args: cobra.ExactArgs(0), Run: func(c *cobra.Command, args []string) { + // TODO(blakerouse): Fix inspect command for Elastic Agent v2 /* if err := inspectConfig(paths.ConfigFile()); err != nil { fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) @@ -40,6 +41,7 @@ func newInspectOutputCommandWithArgs(_ []string) *cobra.Command { Long: "Displays configuration generated for output.\nIf no output is specified list of output is displayed", Args: cobra.MaximumNArgs(2), RunE: func(c *cobra.Command, args []string) error { + // TODO(blakerouse): Fix inspect command for Elastic Agent v2 /* outName, _ := c.Flags().GetString("output") program, _ := c.Flags().GetString("program") diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 7c2c422a65b..8f515d21138 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -7,7 +7,8 @@ package configuration import ( "path/filepath" - "github.com/elastic/elastic-agent/internal/pkg/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/pkg/core/logger" diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index b43db32892d..090c5e06d59 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -16,27 +16,17 @@ import ( "github.com/kardianos/service" "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/config/operations" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" ) -const ( - inputsKey = "inputs" - outputsKey = "outputs" -) - // Uninstall uninstalls persistently Elastic Agent on the system. func Uninstall(cfgFile string) error { // uninstall the current service @@ -56,7 +46,7 @@ func Uninstall(cfgFile string) error { } _ = svc.Uninstall() - if err := uninstallPrograms(context.Background(), cfgFile); err != nil { + if err := uninstallComponents(context.Background(), cfgFile); err != nil { return err } @@ -121,99 +111,71 @@ func delayedRemoval(path string) { } -func uninstallPrograms(ctx context.Context, cfgFile string) error { +func uninstallComponents(ctx context.Context, cfgFile string) error { log, err := logger.NewWithLogpLevel("", logp.ErrorLevel, false) if err != nil { return err } - cfg, err := operations.LoadFullAgentConfig(cfgFile, false) + platform, err := component.LoadPlatformDetail() if err != nil { - return err + return fmt.Errorf("failed to gather system information: %w", err) } - cfg, err = applyDynamics(ctx, log, cfg) + specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) if err != nil { - return err + return fmt.Errorf("failed to detect inputs and outputs: %w", err) } - pp, err := programsFromConfig(cfg) + cfg, err := operations.LoadFullAgentConfig(cfgFile, false) if err != nil { return err } - // nothing to remove - if len(pp) == 0 { - return nil + cfg, err = applyDynamics(ctx, log, cfg) + if err != nil { + return err } - uninstaller, err := uninstall.NewUninstaller() + comps, err := serviceComponentsFromConfig(specs, cfg) if err != nil { return err } - currentVersion := release.Version() - if release.Snapshot() { - currentVersion = fmt.Sprintf("%s-SNAPSHOT", currentVersion) + // nothing to remove + if len(comps) == 0 { + return nil } - artifactConfig := artifact.DefaultConfig() - for _, p := range pp { - descriptor := app.NewDescriptor(p.Spec, currentVersion, artifactConfig, nil) - if err := uninstaller.Uninstall(ctx, p.Spec, currentVersion, descriptor.Directory()); err != nil { - os.Stderr.WriteString(fmt.Sprintf("failed to uninstall '%s': %v\n", p.Spec.Name, err)) + // remove each service component + for _, comp := range comps { + if err := uninstallComponent(ctx, comp); err != nil { + os.Stderr.WriteString(fmt.Sprintf("failed to uninstall component %q: %s\n", comp.ID, err)) } } return nil } -func programsFromConfig(cfg *config.Config) ([]program.Program, error) { +func uninstallComponent(_ context.Context, _ component.Component) error { + // TODO(blakerouse): Perform uninstall of service component; once the service runtime is written. + return errors.New("failed to uninstall component; not implemented") +} + +func serviceComponentsFromConfig(specs component.RuntimeSpecs, cfg *config.Config) ([]component.Component, error) { mm, err := cfg.ToMapStr() if err != nil { return nil, errors.New("failed to create a map from config", err) } - - // if no input is defined nothing to remove - if _, found := mm[inputsKey]; !found { - return nil, nil - } - - // if no output is defined nothing to remove - if _, found := mm[outputsKey]; !found { - return nil, nil - } - - ast, err := transpiler.NewAST(mm) - if err != nil { - return nil, errors.New("failed to create a ast from config", err) - } - - agentInfo, err := info.NewAgentInfo(false) - if err != nil { - return nil, errors.New("failed to get an agent info", err) - } - - ppMap, err := program.Programs(agentInfo, ast) - if err != nil { - return nil, errors.New("failed to get programs from config", err) - } - - var pp []program.Program - check := make(map[string]bool) - - for _, v := range ppMap { - for _, p := range v { - if _, found := check[p.Spec.CommandName()]; found { - continue - } - - pp = append(pp, p) - check[p.Spec.CommandName()] = true + allComps, err := specs.ToComponents(mm) + var serviceComps []component.Component + for _, comp := range allComps { + if comp.Err == nil && comp.Spec.Spec.Service != nil { + // non-error and service based component + serviceComps = append(serviceComps, comp) } } - - return pp, nil + return serviceComps, nil } func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) (*config.Config, error) { diff --git a/internal/pkg/agent/operation/common_test.go b/internal/pkg/agent/operation/common_test.go deleted file mode 100644 index aadc65b5db9..00000000000 --- a/internal/pkg/agent/operation/common_test.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "os" - "path/filepath" - "runtime" - "testing" - "time" - - "go.elastic.co/apm/apmtest" - - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/core/retry" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -var downloadPath = getAbsPath("tests/downloads") -var installPath = getAbsPath("tests/scripts") - -func getTestOperator(t *testing.T, downloadPath string, installPath string, p *app.Descriptor) *Operator { - operatorCfg := &configuration.SettingsConfig{ - RetryConfig: &retry.Config{ - Enabled: true, - RetriesCount: 2, - Delay: 3 * time.Second, - MaxDelay: 10 * time.Second, - }, - ProcessConfig: &process.Config{ - FailureTimeout: 1, // restart instantly - }, - DownloadConfig: &artifact.Config{ - TargetDirectory: downloadPath, - InstallPath: installPath, - }, - LoggingConfig: logger.DefaultLoggingConfig(), - } - - l := getLogger() - agentInfo, _ := info.NewAgentInfo(true) - - installer := &DummyInstallerChecker{} - uninstaller := &DummyUninstaller{} - fetcher := &DummyDownloader{} - verifier := &DummyVerifier{} - - stateResolver, err := stateresolver.NewStateResolver(l) - if err != nil { - t.Fatal(err) - } - srv, err := server.New(l, "localhost:0", &ApplicationStatusHandler{}, apmtest.DiscardTracer) - if err != nil { - t.Fatal(err) - } - err = srv.Start() - if err != nil { - t.Fatal(err) - } - - operator, err := NewOperator(context.Background(), l, agentInfo, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor(), status.NewController(l)) - if err != nil { - t.Fatal(err) - } - - operator.config.DownloadConfig.OperatingSystem = "darwin" - operator.config.DownloadConfig.Architecture = "64" - - // make the download path so the `operation_verify` can ensure the path exists - downloadConfig := operator.config.DownloadConfig - fullPath, err := artifact.GetArtifactPath(p.Spec(), p.Version(), downloadConfig.OS(), downloadConfig.Arch(), downloadConfig.TargetDirectory) - if err != nil { - t.Fatal(err) - } - createFile(t, fullPath) - - return operator -} - -func getLogger() *logger.Logger { - loggerCfg := logger.DefaultLoggingConfig() - loggerCfg.Level = logp.ErrorLevel - l, _ := logger.NewFromConfig("", loggerCfg, false) - return l -} - -func getProgram(binary, version string) *app.Descriptor { - spec := program.SupportedMap[binary] - downloadCfg := &artifact.Config{ - InstallPath: installPath, - OperatingSystem: "darwin", - Architecture: "64", - } - return app.NewDescriptorWithPath(installPath, spec, version, downloadCfg, nil) -} - -func getAbsPath(path string) string { - _, filename, _, _ := runtime.Caller(0) - return filepath.Join(filepath.Dir(filename), path) -} - -func createFile(t *testing.T, path string) { - _, err := os.Stat(path) - if os.IsNotExist(err) { - file, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - defer file.Close() - } -} - -func waitFor(t *testing.T, check func() error) { - started := time.Now() - for { - err := check() - if err == nil { - return - } - if time.Since(started) >= 15*time.Second { - t.Fatalf("check timed out after 15 second: %s", err) - } - time.Sleep(10 * time.Millisecond) - } -} - -type DummyDownloader struct{} - -func (*DummyDownloader) Download(_ context.Context, _ program.Spec, _ string) (string, error) { - return "", nil -} - -var _ download.Downloader = &DummyDownloader{} - -type DummyVerifier struct{} - -func (*DummyVerifier) Verify(_ program.Spec, _ string) error { - return nil -} - -var _ download.Verifier = &DummyVerifier{} - -type DummyInstallerChecker struct{} - -func (*DummyInstallerChecker) Check(_ context.Context, _ program.Spec, _, _ string) error { - return nil -} - -func (*DummyInstallerChecker) Install(_ context.Context, _ program.Spec, _, _ string) error { - return nil -} - -var _ install.InstallerChecker = &DummyInstallerChecker{} - -type DummyUninstaller struct{} - -func (*DummyUninstaller) Uninstall(_ context.Context, _ program.Spec, _, _ string) error { - return nil -} - -var _ uninstall.Uninstaller = &DummyUninstaller{} diff --git a/internal/pkg/agent/operation/monitoring.go b/internal/pkg/agent/operation/monitoring.go deleted file mode 100644 index 11e080d7fd3..00000000000 --- a/internal/pkg/agent/operation/monitoring.go +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" -) - -const ( - monitoringName = "FLEET_MONITORING" - outputKey = "output" - logsProcessName = "filebeat" - metricsProcessName = "metricbeat" - artifactPrefix = "beats" - agentName = "elastic-agent" -) - -func (o *Operator) handleStartSidecar(s configrequest.Step) (result error) { - // if monitoring is disabled and running stop it - if !o.monitor.IsMonitoringEnabled() { - if o.isMonitoring != 0 { - o.logger.Info("operator.handleStartSidecar: monitoring is running and disabled, proceeding to stop") - return o.handleStopSidecar(s) - } - - o.logger.Info("operator.handleStartSidecar: monitoring is not running and disabled, no action taken") - return nil - } - - for _, step := range o.getMonitoringSteps(s) { - p, cfg, err := getProgramFromStepWithTags(step, o.config.DownloadConfig, monitoringTags()) - if err != nil { - return errors.New(err, - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName()), - "operator.handleStartSidecar failed to create program") - } - - // best effort on starting monitoring, if no hosts provided stop and spare resources - if step.ID == configrequest.StepRemove { - if err := o.stop(p); err != nil { - result = multierror.Append(err, err) - } else { - o.markStopMonitoring(step.ProgramSpec.CommandName()) - } - } else { - if err := o.start(p, cfg); err != nil { - result = multierror.Append(err, err) - } else { - o.markStartMonitoring(step.ProgramSpec.CommandName()) - } - } - } - - return result -} - -func (o *Operator) handleStopSidecar(s configrequest.Step) (result error) { - for _, step := range o.generateMonitoringSteps(s.Version, "", nil) { - p, _, err := getProgramFromStepWithTags(step, o.config.DownloadConfig, monitoringTags()) - if err != nil { - return errors.New(err, - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName()), - "operator.handleStopSidecar failed to create program") - } - - o.logger.Debugf("stopping program %v", p) - if err := o.stop(p); err != nil { - result = multierror.Append(err, err) - } else { - o.markStopMonitoring(step.ProgramSpec.CommandName()) - } - } - - return result -} - -func monitoringTags() map[app.Tag]string { - return map[app.Tag]string{ - app.TagSidecar: "true", - } -} - -func (o *Operator) getMonitoringSteps(step configrequest.Step) []configrequest.Step { - // get output - config, err := getConfigFromStep(step) - if err != nil { - o.logger.Error("operator.getMonitoringSteps: getting config from step failed: %v", err) - return nil - } - - outputIface, found := config[outputKey] - if !found { - o.logger.Errorf("operator.getMonitoringSteps: monitoring configuration not found for sidecar of type %s", step.ProgramSpec.CommandName()) - return nil - } - - outputMap, ok := outputIface.(map[string]interface{}) - if !ok { - o.logger.Error("operator.getMonitoringSteps: monitoring config is not a map") - return nil - } - - if len(outputMap) == 0 { - o.logger.Errorf("operator.getMonitoringSteps: monitoring is missing an output configuration for sidecar of type: %s", step.ProgramSpec.CommandName()) - return nil - } - - // Guards against parser issues upstream, this should not be possible but - // since we are folding all the child options as a map we should make sure we have - //a unique output. - if len(outputMap) > 1 { - o.logger.Errorf("operator.getMonitoringSteps: monitoring has too many outputs configuration for sidecar of type: %s", step.ProgramSpec.CommandName()) - return nil - } - - // Aggregate output configuration independently of the received output key. - output := make(map[string]interface{}) - - for _, v := range outputMap { - child, ok := v.(map[string]interface{}) - if !ok { - o.logger.Error("operator.getMonitoringSteps: monitoring config is not a map") - return nil - } - for c, j := range child { - output[c] = j - } - } - - t, ok := output["type"] - if !ok { - o.logger.Errorf("operator.getMonitoringSteps: unknown monitoring output for sidecar of type: %s", step.ProgramSpec.CommandName()) - return nil - } - - outputType, ok := t.(string) - if !ok { - o.logger.Errorf("operator.getMonitoringSteps: unexpected monitoring output type: %+v for sidecar of type: %s", t, step.ProgramSpec.CommandName()) - return nil - } - - return o.generateMonitoringSteps(step.Version, outputType, output) -} - -func (o *Operator) generateMonitoringSteps(version, outputType string, output interface{}) []configrequest.Step { - var steps []configrequest.Step - watchLogs := o.monitor.WatchLogs() - watchMetrics := o.monitor.WatchMetrics() - monitoringNamespace := o.monitor.MonitoringNamespace() - - // generate only when monitoring is running (for config refresh) or - // state changes (turning on/off) - if watchLogs != o.isMonitoringLogs() || watchLogs { - fbConfig, any := o.getMonitoringFilebeatConfig(outputType, output, monitoringNamespace) - stepID := configrequest.StepRun - if !watchLogs || !any { - stepID = configrequest.StepRemove - } - filebeatStep := configrequest.Step{ - ID: stepID, - Version: version, - ProgramSpec: loadSpecFromSupported(logsProcessName), - Meta: map[string]interface{}{ - configrequest.MetaConfigKey: fbConfig, - }, - } - - steps = append(steps, filebeatStep) - } - if watchMetrics != o.isMonitoringMetrics() || watchMetrics { - mbConfig, any := o.getMonitoringMetricbeatConfig(outputType, output, monitoringNamespace) - stepID := configrequest.StepRun - if !watchMetrics || !any { - stepID = configrequest.StepRemove - } - - metricbeatStep := configrequest.Step{ - ID: stepID, - Version: version, - ProgramSpec: loadSpecFromSupported(metricsProcessName), - Meta: map[string]interface{}{ - configrequest.MetaConfigKey: mbConfig, - }, - } - - steps = append(steps, metricbeatStep) - } - - return steps -} - -func loadSpecFromSupported(processName string) program.Spec { - if loadedSpec, found := program.SupportedMap[strings.ToLower(processName)]; found { - return loadedSpec - } - - return program.Spec{ - Name: processName, - Cmd: processName, - Artifact: fmt.Sprintf("%s/%s", artifactPrefix, processName), - } -} - -func (o *Operator) getMonitoringFilebeatConfig(outputType string, output interface{}, monitoringNamespace string) (map[string]interface{}, bool) { - inputs := []interface{}{ - map[string]interface{}{ - "type": "filestream", - "close": map[string]interface{}{ - "on_state_change": map[string]interface{}{ - "inactive": "5m", - }, - }, - "parsers": []map[string]interface{}{ - { - "ndjson": map[string]interface{}{ - "overwrite_keys": true, - "message_key": "message", - }, - }, - }, - "paths": []string{ - filepath.Join(paths.Home(), "logs", "elastic-agent-*.ndjson"), - filepath.Join(paths.Home(), "logs", "elastic-agent-watcher-*.ndjson"), - }, - "index": fmt.Sprintf("logs-elastic_agent-%s", monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "logs", - "dataset": "elastic_agent", - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": "elastic_agent", - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - { - "drop_fields": map[string]interface{}{ - "fields": []string{ - "ecs.version", //coming from logger, already added by libbeat - }, - "ignore_missing": true, - }, - }, - }, - }, - } - logPaths := o.getLogFilePaths() - if len(logPaths) > 0 { - for name, paths := range logPaths { - inputs = append(inputs, map[string]interface{}{ - "type": "filestream", - "close": map[string]interface{}{ - "on_state_change": map[string]interface{}{ - "inactive": "5m", - }, - }, - "parsers": []map[string]interface{}{ - { - "ndjson": map[string]interface{}{ - "overwrite_keys": true, - "message_key": "message", - }, - }, - }, - "paths": paths, - "index": fmt.Sprintf("logs-elastic_agent.%s-%s", name, monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "logs", - "dataset": fmt.Sprintf("elastic_agent.%s", name), - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", name), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - { - "drop_fields": map[string]interface{}{ - "fields": []string{ - "ecs.version", //coming from logger, already added by libbeat - }, - "ignore_missing": true, - }, - }, - }, - }) - } - } - - result := map[string]interface{}{ - "filebeat": map[string]interface{}{ - "inputs": inputs, - }, - "output": map[string]interface{}{ - outputType: output, - }, - } - - return result, true -} - -func (o *Operator) getMonitoringMetricbeatConfig(outputType string, output interface{}, monitoringNamespace string) (map[string]interface{}, bool) { - hosts := o.getMetricbeatEndpoints() - if len(hosts) == 0 { - return nil, false - } - var modules []interface{} - fixedAgentName := strings.ReplaceAll(agentName, "-", "_") - - for name, endpoints := range hosts { - modules = append(modules, map[string]interface{}{ - "module": "beat", - "metricsets": []string{"stats", "state"}, - "period": "10s", - "hosts": endpoints, - "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", name, monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "metrics", - "dataset": fmt.Sprintf("elastic_agent.%s", name), - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", name), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - }, - }, map[string]interface{}{ - "module": "http", - "metricsets": []string{"json"}, - "namespace": "agent", - "period": "10s", - "path": "/stats", - "hosts": endpoints, - "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", fixedAgentName, monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "metrics", - "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - "process": name, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - { - "copy_fields": map[string]interface{}{ - "fields": normalizeHTTPCopyRules(name), - "ignore_missing": true, - }, - }, - { - "drop_fields": map[string]interface{}{ - "fields": []string{ - "http", - }, - "ignore_missing": true, - }, - }, - }, - }) - } - - modules = append(modules, map[string]interface{}{ - "module": "http", - "metricsets": []string{"json"}, - "namespace": "agent", - "period": "10s", - "path": "/stats", - "hosts": []string{beats.AgentPrefixedMonitoringEndpoint(o.config.DownloadConfig.OS(), o.config.MonitoringConfig.HTTP)}, - "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", fixedAgentName, monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "metrics", - "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - "process": "elastic-agent", - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - { - "copy_fields": map[string]interface{}{ - "fields": normalizeHTTPCopyRules(fixedAgentName), - "ignore_missing": true, - }, - }, - { - "drop_fields": map[string]interface{}{ - "fields": []string{ - "http", - }, - "ignore_missing": true, - }, - }, - }, - }) - - result := map[string]interface{}{ - "metricbeat": map[string]interface{}{ - "modules": modules, - }, - "output": map[string]interface{}{ - outputType: output, - }, - } - - return result, true -} - -func (o *Operator) getLogFilePaths() map[string][]string { - paths := map[string][]string{} - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - for _, a := range o.apps { - logPath := a.Monitor().LogPath(a.Spec(), o.pipelineID) - if logPath != "" { - paths[strings.ReplaceAll(a.Name(), "-", "_")] = []string{ - logPath, - fmt.Sprintf("%s*", logPath), - } - } - } - - return paths -} - -func (o *Operator) getMetricbeatEndpoints() map[string][]string { - endpoints := map[string][]string{} - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - for _, a := range o.apps { - metricEndpoint := a.Monitor().MetricsPathPrefixed(a.Spec(), o.pipelineID) - if metricEndpoint != "" { - safeName := strings.ReplaceAll(a.Name(), "-", "_") - // prevent duplicates - var found bool - for _, ep := range endpoints[safeName] { - if ep == metricEndpoint { - found = true - break - } - } - - if !found { - endpoints[safeName] = append(endpoints[safeName], metricEndpoint) - } - } - } - - return endpoints -} - -func (o *Operator) markStopMonitoring(process string) { - switch process { - case logsProcessName: - o.isMonitoring ^= isMonitoringLogsFlag - case metricsProcessName: - o.isMonitoring ^= isMonitoringMetricsFlag - } -} - -func (o *Operator) markStartMonitoring(process string) { - switch process { - case logsProcessName: - o.isMonitoring |= isMonitoringLogsFlag - case metricsProcessName: - o.isMonitoring |= isMonitoringMetricsFlag - } -} - -func (o *Operator) isMonitoringLogs() bool { - return (o.isMonitoring & isMonitoringLogsFlag) != 0 -} - -func (o *Operator) isMonitoringMetrics() bool { - return (o.isMonitoring & isMonitoringMetricsFlag) != 0 -} - -func normalizeHTTPCopyRules(name string) []map[string]interface{} { - fromToMap := []map[string]interface{}{ - // I should be able to see the CPU Usage on the running machine. Am using too much CPU? - { - "from": "http.agent.beat.cpu", - "to": "system.process.cpu", - }, - // I should be able to see the Memory usage of Elastic Agent. Is the Elastic Agent using too much memory? - { - "from": "http.agent.beat.memstats.memory_sys", - "to": "system.process.memory.size", - }, - // I should be able to see the system memory. Am I running out of memory? - // TODO: with APM agent: total and free - - // I should be able to see Disk usage on the running machine. Am I running out of disk space? - // TODO: with APM agent - - // I should be able to see fd usage. Am I keep too many files open? - { - "from": "http.agent.beat.handles", - "to": "system.process.fd", - }, - // Cgroup reporting - { - "from": "http.agent.beat.cgroup", - "to": "system.process.cgroup", - }, - } - - spec, found := program.SupportedMap[name] - if !found { - return fromToMap - } - - for _, exportedMetric := range spec.ExportedMetrics { - fromToMap = append(fromToMap, map[string]interface{}{ - "from": fmt.Sprintf("http.agent.%s", exportedMetric), - "to": exportedMetric, - }) - } - - return fromToMap -} diff --git a/internal/pkg/agent/operation/monitoring_test.go b/internal/pkg/agent/operation/monitoring_test.go deleted file mode 100644 index c64ede2a8f1..00000000000 --- a/internal/pkg/agent/operation/monitoring_test.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - "go.elastic.co/apm/apmtest" - - "github.com/elastic/elastic-agent/internal/pkg/testutils" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/retry" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -func TestExportedMetrics(t *testing.T) { - programName := "testing" - expectedMetricsName := "metric_name" - program.SupportedMap[programName] = program.Spec{ - ExportedMetrics: []string{expectedMetricsName}, - } - - exportedMetrics := normalizeHTTPCopyRules(programName) - - exportedMetricFound := false - for _, kv := range exportedMetrics { - from, found := kv["from"] - if !found { - continue - } - to, found := kv["to"] - if !found { - continue - } - - if to != expectedMetricsName { - continue - } - if from != fmt.Sprintf("http.agent.%s", expectedMetricsName) { - continue - } - exportedMetricFound = true - break - } - - require.True(t, exportedMetricFound, "exported metric not found") - delete(program.SupportedMap, programName) -} - -func TestGenerateSteps(t *testing.T) { - testutils.InitStorage(t) - - const sampleOutput = "sample-output" - const outputType = "logstash" - - type testCase struct { - Name string - Config *monitoringConfig.MonitoringConfig - ExpectedSteps int - FilebeatStep bool - MetricbeatStep bool - } - - testCases := []testCase{ - {"NO monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: false, MonitorMetrics: false}, 0, false, false}, - {"FB monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: true, MonitorMetrics: false}, 1, true, false}, - {"MB monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: false, MonitorMetrics: true}, 1, false, true}, - {"ALL monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: true, MonitorMetrics: true}, 2, true, true}, - } - - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - m := &testMonitor{monitorLogs: tc.Config.MonitorLogs, monitorMetrics: tc.Config.MonitorMetrics} - operator := getMonitorableTestOperator(t, "tests/scripts", m, tc.Config) - steps := operator.generateMonitoringSteps("8.0", outputType, sampleOutput) - if actualSteps := len(steps); actualSteps != tc.ExpectedSteps { - t.Fatalf("invalid number of steps, expected %v, got %v", tc.ExpectedSteps, actualSteps) - } - - var fbFound, mbFound bool - for _, s := range steps { - // Filebeat step check - if s.ProgramSpec.CommandName() == "filebeat" { - fbFound = true - checkStep(t, "filebeat", outputType, sampleOutput, s) - } - - // Metricbeat step check - if s.ProgramSpec.CommandName() == "metricbeat" { - mbFound = true - checkStep(t, "metricbeat", outputType, sampleOutput, s) - } - } - - if tc.FilebeatStep != fbFound { - t.Fatalf("Steps for filebeat do not match. Was expected: %v, Was found: %v", tc.FilebeatStep, fbFound) - } - - if tc.MetricbeatStep != mbFound { - t.Fatalf("Steps for metricbeat do not match. Was expected: %v, Was found: %v", tc.MetricbeatStep, mbFound) - } - }) - } -} - -func checkStep(t *testing.T, stepName string, outputType string, expectedOutput interface{}, s configrequest.Step) { - if meta := s.Meta[configrequest.MetaConfigKey]; meta != nil { - mapstr, ok := meta.(map[string]interface{}) - if !ok { - t.Fatalf("no meta config for %s step", stepName) - } - - esOut, ok := mapstr["output"].(map[string]interface{}) - if !ok { - t.Fatalf("output not found for %s step", stepName) - } - - if actualOutput := esOut[outputType]; actualOutput != expectedOutput { - t.Fatalf("output for %s step does not match. expected: %v, got %v", stepName, expectedOutput, actualOutput) - } - } -} - -func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.Monitor, mcfg *monitoringConfig.MonitoringConfig) *Operator { - cfg := &configuration.SettingsConfig{ - RetryConfig: &retry.Config{ - Enabled: true, - RetriesCount: 2, - Delay: 3 * time.Second, - MaxDelay: 10 * time.Second, - }, - ProcessConfig: &process.Config{}, - DownloadConfig: &artifact.Config{ - InstallPath: installPath, - OperatingSystem: "darwin", - }, - MonitoringConfig: mcfg, - } - - l := getLogger() - agentInfo, _ := info.NewAgentInfo(true) - - installer := &DummyInstallerChecker{} - uninstaller := &DummyUninstaller{} - fetcher := &DummyDownloader{} - verifier := &DummyVerifier{} - - stateResolver, err := stateresolver.NewStateResolver(l) - if err != nil { - t.Fatal(err) - } - srv, err := server.New(l, "localhost:0", &ApplicationStatusHandler{}, apmtest.DiscardTracer) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - operator, err := NewOperator(ctx, l, agentInfo, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m, status.NewController(l)) - if err != nil { - t.Fatal(err) - } - - operator.apps["dummy"] = &testMonitorableApp{monitor: m} - - return operator -} - -type testMonitorableApp struct { - monitor monitoring.Monitor -} - -func (*testMonitorableApp) Name() string { return "" } -func (*testMonitorableApp) Started() bool { return false } -func (*testMonitorableApp) Start(_ context.Context, _ app.Taggable, cfg map[string]interface{}) error { - return nil -} -func (*testMonitorableApp) Stop() {} -func (*testMonitorableApp) Shutdown() {} -func (*testMonitorableApp) Configure(_ context.Context, config map[string]interface{}) error { - return nil -} -func (*testMonitorableApp) Spec() program.Spec { return program.Spec{} } -func (*testMonitorableApp) State() state.State { return state.State{} } -func (*testMonitorableApp) SetState(_ state.Status, _ string, _ map[string]interface{}) {} -func (a *testMonitorableApp) Monitor() monitoring.Monitor { return a.monitor } -func (a *testMonitorableApp) OnStatusChange(_ *server.ApplicationState, _ proto.StateObserved_Status, _ string, _ map[string]interface{}) { -} - -type testMonitor struct { - monitorLogs bool - monitorMetrics bool -} - -// EnrichArgs enriches arguments provided to application, in order to enable -// monitoring -func (b *testMonitor) EnrichArgs(_ program.Spec, _ string, args []string, _ bool) []string { - return args -} - -// Cleanup cleans up all drops. -func (b *testMonitor) Cleanup(program.Spec, string) error { return nil } - -// Close closes the monitor. -func (b *testMonitor) Close() {} - -// Prepare executes steps in order for monitoring to work correctly -func (b *testMonitor) Prepare(program.Spec, string, int, int) error { return nil } - -const testPath = "path" - -// LogPath describes a path where application stores logs. Empty if -// application is not monitorable -func (b *testMonitor) LogPath(program.Spec, string) string { - if !b.monitorLogs { - return "" - } - return testPath -} - -// MetricsPath describes a location where application exposes metrics -// collectable by metricbeat. -func (b *testMonitor) MetricsPath(program.Spec, string) string { - if !b.monitorMetrics { - return "" - } - return testPath -} - -// MetricsPathPrefixed return metrics path prefixed with http+ prefix. -func (b *testMonitor) MetricsPathPrefixed(program.Spec, string) string { - return "http+path" -} - -// Reload reloads state based on configuration. -func (b *testMonitor) Reload(cfg *config.Config) error { return nil } - -// IsMonitoringEnabled returns true if monitoring is configured. -func (b *testMonitor) IsMonitoringEnabled() bool { return b.monitorLogs || b.monitorMetrics } - -// MonitoringNamespace returns monitoring namespace configured. -func (b *testMonitor) MonitoringNamespace() string { return "default" } - -// WatchLogs return true if monitoring is configured and monitoring logs is enabled. -func (b *testMonitor) WatchLogs() bool { return b.monitorLogs } - -// WatchMetrics return true if monitoring is configured and monitoring metrics is enabled. -func (b *testMonitor) WatchMetrics() bool { return b.monitorMetrics } diff --git a/internal/pkg/agent/operation/operation.go b/internal/pkg/agent/operation/operation.go deleted file mode 100644 index 0419058cc44..00000000000 --- a/internal/pkg/agent/operation/operation.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -// operation is an operation definition -// each operation needs to implement this interface in order -// to ease up rollbacks -type operation interface { - // Name is human readable name which identifies an operation - Name() string - // Check checks whether operation needs to be run - // In case prerequisites (such as invalid cert or tweaked binary) are not met, it returns error - // examples: - // - Start does not need to run if process is running - // - Fetch does not need to run if package is already present - Check(ctx context.Context, application Application) (bool, error) - // Run runs the operation - Run(ctx context.Context, application Application) error -} - -// Application is an application capable of being started, stopped and configured. -type Application interface { - Name() string - Started() bool - Start(ctx context.Context, p app.Taggable, cfg map[string]interface{}) error - Stop() - Shutdown() - Configure(ctx context.Context, config map[string]interface{}) error - Monitor() monitoring.Monitor - State() state.State - Spec() program.Spec - SetState(status state.Status, msg string, payload map[string]interface{}) - OnStatusChange(s *server.ApplicationState, status proto.StateObserved_Status, msg string, payload map[string]interface{}) -} - -// Descriptor defines a program which needs to be run. -// Is passed around operator operations. -type Descriptor interface { - Spec() program.Spec - ServicePort() int - BinaryName() string - Version() string - ID() string - Directory() string - Tags() map[app.Tag]string -} - -// ApplicationStatusHandler expects that only Application is registered in the server and updates the -// current state of the application from the OnStatusChange callback from inside the server. -// -// In the case that an application is reported as failed by the server it will then restart the application, unless -// it expects that the application should be stopping. -type ApplicationStatusHandler struct{} - -// OnStatusChange is the handler called by the GRPC server code. -// -// It updates the status of the application and handles restarting the application is needed. -func (*ApplicationStatusHandler) OnStatusChange(s *server.ApplicationState, status proto.StateObserved_Status, msg string, payload map[string]interface{}) { - if state.IsStateFiltered(msg, payload) { - return - } - app, ok := s.App().(Application) - - if !ok { - panic(errors.New("only Application can be registered when using the ApplicationStatusHandler", errors.TypeUnexpected)) - } - app.OnStatusChange(s, status, msg, payload) -} diff --git a/internal/pkg/agent/operation/operation_config.go b/internal/pkg/agent/operation/operation_config.go deleted file mode 100644 index fec587f3540..00000000000 --- a/internal/pkg/agent/operation/operation_config.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin/process" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -var ( - // ErrClientNotFound is an error when client is not found - ErrClientNotFound = errors.New("client not found, check if process is running") - // ErrClientNotConfigurable happens when stored client does not implement Config func - ErrClientNotConfigurable = errors.New("client does not provide configuration") -) - -// Configures running process by sending a configuration to its -// grpc endpoint -type operationConfig struct { - logger *logger.Logger - operatorConfig *configuration.SettingsConfig - cfg map[string]interface{} -} - -func newOperationConfig( - logger *logger.Logger, - operatorConfig *configuration.SettingsConfig, - cfg map[string]interface{}) *operationConfig { - return &operationConfig{ - logger: logger, - operatorConfig: operatorConfig, - cfg: cfg, - } -} - -// Name is human readable name identifying an operation -func (o *operationConfig) Name() string { - return "operation-config" -} - -// Check checks whether config needs to be run. -// -// Always returns true. -func (o *operationConfig) Check(_ context.Context, _ Application) (bool, error) { return true, nil } - -// Run runs the operation -func (o *operationConfig) Run(ctx context.Context, application Application) (err error) { - defer func() { - if err != nil { - // application failed to apply config but is running. - s := state.Degraded - if errors.Is(err, process.ErrAppNotRunning) { - s = state.Failed - } - - application.SetState(s, err.Error(), nil) - } - }() - return application.Configure(ctx, o.cfg) -} diff --git a/internal/pkg/agent/operation/operation_retryable.go b/internal/pkg/agent/operation/operation_retryable.go deleted file mode 100644 index b30fd68563c..00000000000 --- a/internal/pkg/agent/operation/operation_retryable.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "fmt" - "strings" - - "github.com/elastic/elastic-agent/internal/pkg/core/retry" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// retryableOperations consists of multiple operations which are -// retryable as a whole. -// if nth operation fails all preceding are retried as well -type retryableOperations struct { - logger *logger.Logger - operations []operation - retryConfig *retry.Config -} - -func newRetryableOperations( - logger *logger.Logger, - retryConfig *retry.Config, - operations ...operation) *retryableOperations { - - return &retryableOperations{ - logger: logger, - retryConfig: retryConfig, - operations: operations, - } -} - -// Name is human readable name identifying an operation -func (o *retryableOperations) Name() string { - names := make([]string, 0, len(o.operations)) - for _, op := range o.operations { - names = append(names, op.Name()) - } - return fmt.Sprintf("retryable block: %s", strings.Join(names, " ")) -} - -// Check checks whether operation needs to be run -// examples: -// - Start does not need to run if process is running -// - Fetch does not need to run if package is already present -func (o *retryableOperations) Check(ctx context.Context, application Application) (bool, error) { - for _, op := range o.operations { - // finish early if at least one operation needs to be run or errored out - if run, err := op.Check(ctx, application); err != nil || run { - return run, err - } - } - - return false, nil -} - -// Run runs the operation -func (o *retryableOperations) Run(ctx context.Context, application Application) (err error) { - return retry.Do(ctx, o.retryConfig, o.runOnce(application)) -} - -// Run runs the operation -func (o *retryableOperations) runOnce(application Application) func(context.Context) error { - return func(ctx context.Context) error { - for _, op := range o.operations { - if ctx.Err() != nil { - return ctx.Err() - } - - shouldRun, err := op.Check(ctx, application) - if err != nil { - return err - } - - if !shouldRun { - continue - } - - o.logger.Debugf("running operation '%s' of the block '%s'", op.Name(), o.Name()) - if err := op.Run(ctx, application); err != nil { - o.logger.Errorf("operation %s failed, err: %v", op.Name(), err) - return err - } - } - - return nil - } -} - -// check interface -var _ operation = &retryableOperations{} diff --git a/internal/pkg/agent/operation/operation_start.go b/internal/pkg/agent/operation/operation_start.go deleted file mode 100644 index 38ee7167766..00000000000 --- a/internal/pkg/agent/operation/operation_start.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// operationStart start installed process -// skips if process is already running -type operationStart struct { - logger *logger.Logger - program Descriptor - operatorConfig *configuration.SettingsConfig - cfg map[string]interface{} -} - -func newOperationStart( - logger *logger.Logger, - program Descriptor, - operatorConfig *configuration.SettingsConfig, - cfg map[string]interface{}) *operationStart { - // TODO: make configurable - - return &operationStart{ - logger: logger, - program: program, - operatorConfig: operatorConfig, - cfg: cfg, - } -} - -// Name is human readable name identifying an operation -func (o *operationStart) Name() string { - return "operation-start" -} - -// Check checks whether application needs to be started. -// -// Only starts the application when in stopped state, any other state -// and the application is handled by the life cycle inside of the `Application` -// implementation. -func (o *operationStart) Check(_ context.Context, application Application) (bool, error) { - if application.Started() { - return false, nil - } - return true, nil -} - -// Run runs the operation -func (o *operationStart) Run(ctx context.Context, application Application) (err error) { - defer func() { - if err != nil { - application.SetState(state.Failed, err.Error(), nil) - } - }() - - return application.Start(ctx, o.program, o.cfg) -} diff --git a/internal/pkg/agent/operation/operation_stop.go b/internal/pkg/agent/operation/operation_stop.go deleted file mode 100644 index cb33010e1af..00000000000 --- a/internal/pkg/agent/operation/operation_stop.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// operationStop stops the running process -// skips if process is already skipped -type operationStop struct { - logger *logger.Logger - operatorConfig *configuration.SettingsConfig -} - -func newOperationStop( - logger *logger.Logger, - operatorConfig *configuration.SettingsConfig) *operationStop { - return &operationStop{ - logger: logger, - operatorConfig: operatorConfig, - } -} - -// Name is human readable name identifying an operation -func (o *operationStop) Name() string { - return "operation-stop" -} - -// Check checks whether application needs to be stopped. -// -// If the application state is not stopped then stop should be performed. -func (o *operationStop) Check(_ context.Context, application Application) (bool, error) { - if application.State().Status != state.Stopped { - return true, nil - } - return false, nil -} - -// Run runs the operation -func (o *operationStop) Run(ctx context.Context, application Application) (err error) { - application.Stop() - return nil -} diff --git a/internal/pkg/agent/operation/operator.go b/internal/pkg/agent/operation/operator.go deleted file mode 100644 index 50bf725e193..00000000000 --- a/internal/pkg/agent/operation/operator.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "fmt" - "os" - "strings" - "sync" - "time" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin/process" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin/service" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -const ( - isMonitoringMetricsFlag = 1 << 0 - isMonitoringLogsFlag = 1 << 1 -) - -type waiter interface { - Wait() -} - -// Operator runs Start/Stop/Update operations -// it is responsible for detecting reconnect to existing processes -// based on backed up configuration -// Enables running sidecars for processes. -// TODO: implement retry strategies -type Operator struct { - bgContext context.Context - pipelineID string - logger *logger.Logger - agentInfo *info.AgentInfo - config *configuration.SettingsConfig - handlers map[string]handleFunc - stateResolver *stateresolver.StateResolver - srv *server.Server - reporter state.Reporter - monitor monitoring.Monitor - isMonitoring int - - apps map[string]Application - appsLock sync.Mutex - - installer install.InstallerChecker - uninstaller uninstall.Uninstaller - downloader download.Downloader - verifier download.Verifier - statusController status.Controller - statusReporter status.Reporter -} - -// NewOperator creates a new operator, this operator holds -// a collection of running processes, back it up -// Based on backed up collection it prepares clients, watchers... on init -func NewOperator( - ctx context.Context, - logger *logger.Logger, - agentInfo *info.AgentInfo, - pipelineID string, - config *configuration.SettingsConfig, - fetcher download.Downloader, - verifier download.Verifier, - installer install.InstallerChecker, - uninstaller uninstall.Uninstaller, - stateResolver *stateresolver.StateResolver, - srv *server.Server, - reporter state.Reporter, - monitor monitoring.Monitor, - statusController status.Controller) (*Operator, error) { - if config.DownloadConfig == nil { - return nil, fmt.Errorf("artifacts configuration not provided") - } - - operator := &Operator{ - bgContext: ctx, - config: config, - pipelineID: pipelineID, - logger: logger, - agentInfo: agentInfo, - downloader: fetcher, - verifier: verifier, - installer: installer, - uninstaller: uninstaller, - stateResolver: stateResolver, - srv: srv, - apps: make(map[string]Application), - reporter: reporter, - monitor: monitor, - statusController: statusController, - statusReporter: statusController.RegisterComponent("operator-" + pipelineID), - } - - operator.initHandlerMap() - - os.MkdirAll(config.DownloadConfig.TargetDirectory, 0755) - os.MkdirAll(config.DownloadConfig.InstallPath, 0755) - - return operator, nil -} - -// State describes the current state of the system. -// Reports all known applications and theirs states. Whether they are running -// or not, and if they are information about process is also present. -func (o *Operator) State() map[string]state.State { - result := make(map[string]state.State) - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - for k, v := range o.apps { - result[k] = v.State() - } - - return result -} - -// Specs returns all program specifications -func (o *Operator) Specs() map[string]program.Spec { - r := make(map[string]program.Spec) - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - for _, app := range o.apps { - // use app.Name() instead of the (map) key so we can easy find the "_monitoring" processes - r[app.Name()] = app.Spec() - } - - return r -} - -// Close stops all programs handled by operator and clears state -func (o *Operator) Close() error { - o.monitor.Close() - o.statusReporter.Unregister() - - return o.HandleConfig(context.Background(), configrequest.New("", time.Now(), nil)) -} - -// HandleConfig handles configuration for a pipeline and performs actions to achieve this configuration. -func (o *Operator) HandleConfig(ctx context.Context, cfg configrequest.Request) (err error) { - span, ctx := apm.StartSpan(ctx, "route", "app.internal") - defer func() { - if !errors.Is(err, context.Canceled) { - apm.CaptureError(ctx, err).Send() - } - span.End() - }() - - _, stateID, steps, ack, err := o.stateResolver.Resolve(cfg) - if err != nil { - if !errors.Is(err, context.Canceled) { - // error is not filtered and should be reported - o.statusReporter.Update(state.Failed, err.Error(), nil) - err = errors.New(err, errors.TypeConfig, fmt.Sprintf("operator: failed to resolve configuration %s, error: %v", cfg, err)) - } - - return err - } - o.statusController.UpdateStateID(stateID) - - for _, step := range steps { - if !strings.EqualFold(step.ProgramSpec.CommandName(), monitoringName) { - if _, isSupported := program.SupportedMap[step.ProgramSpec.CommandName()]; !isSupported { - // mark failed, new config cannot be run - msg := fmt.Sprintf("program '%s' is not supported", step.ProgramSpec.CommandName()) - o.statusReporter.Update(state.Failed, msg, nil) - return errors.New(msg, - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName())) - } - } - - handler, found := o.handlers[step.ID] - if !found { - msg := fmt.Sprintf("operator: received unexpected event '%s'", step.ID) - o.statusReporter.Update(state.Failed, msg, nil) - return errors.New(msg, errors.TypeConfig) - } - - if err := handler(step); err != nil { - msg := fmt.Sprintf("operator: failed to execute step %s, error: %v", step.ID, err) - o.statusReporter.Update(state.Failed, msg, nil) - return errors.New(err, errors.TypeConfig, msg) - } - } - - // Ack the resolver should state for next call. - o.statusReporter.Update(state.Healthy, "", nil) - ack() - - return nil -} - -// Shutdown handles shutting down the running apps for Agent shutdown. -func (o *Operator) Shutdown() { - // wait for installer and downloader - if awaitable, ok := o.installer.(waiter); ok { - o.logger.Infof("waiting for installer of pipeline '%s' to finish", o.pipelineID) - awaitable.Wait() - o.logger.Debugf("pipeline installer '%s' done", o.pipelineID) - } - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - wg := sync.WaitGroup{} - wg.Add(len(o.apps)) - - started := time.Now() - - for _, a := range o.apps { - go func(a Application) { - started := time.Now() - a.Shutdown() - wg.Done() - o.logger.Debugf("took %s to shutdown %s", - time.Now().Sub(started), a.Name()) - }(a) - } - wg.Wait() - o.logger.Debugf("took %s to shutdown %d apps", - time.Now().Sub(started), len(o.apps)) -} - -// Start starts a new process based on a configuration -// specific configuration of new process is passed -func (o *Operator) start(p Descriptor, cfg map[string]interface{}) (err error) { - flow := []operation{ - newOperationStart(o.logger, p, o.config, cfg), - newOperationConfig(o.logger, o.config, cfg), - } - - return o.runFlow(p, flow) -} - -// Stop stops the running process, if process is already stopped it does not return an error -func (o *Operator) stop(p Descriptor) (err error) { - flow := []operation{ - newOperationStop(o.logger, o.config), - } - - return o.runFlow(p, flow) -} - -// PushConfig tries to push config to a running process -func (o *Operator) pushConfig(p Descriptor, cfg map[string]interface{}) error { - flow := []operation{ - newOperationConfig(o.logger, o.config, cfg), - } - - return o.runFlow(p, flow) -} - -func (o *Operator) runFlow(p Descriptor, operations []operation) error { - if len(operations) == 0 { - o.logger.Infof("operator received event with no operations for program '%s'", p.ID()) - return nil - } - - app, err := o.getApp(p) - if err != nil { - return err - } - - for _, op := range operations { - if err := o.bgContext.Err(); err != nil { - return err - } - - shouldRun, err := op.Check(o.bgContext, app) - if err != nil { - return err - } - - if !shouldRun { - o.logger.Infof("operation '%s' skipped for %s.%s", op.Name(), p.BinaryName(), p.Version()) - continue - } - - o.logger.Debugf("running operation '%s' for %s.%s", op.Name(), p.BinaryName(), p.Version()) - if err := op.Run(o.bgContext, app); err != nil { - return err - } - } - - // when application is stopped remove from the operator - if app.State().Status == state.Stopped { - o.deleteApp(p) - } - - return nil -} - -func (o *Operator) getApp(p Descriptor) (Application, error) { - o.appsLock.Lock() - defer o.appsLock.Unlock() - - id := p.ID() - - o.logger.Debugf("operator is looking for %s in app collection: %v", p.ID(), o.apps) - if a, ok := o.apps[id]; ok { - return a, nil - } - - desc, ok := p.(*app.Descriptor) - if !ok { - return nil, fmt.Errorf("descriptor is not an app.Descriptor") - } - - // TODO: (michal) join args into more compact options version - var a Application - var err error - - monitor := o.monitor - appName := p.BinaryName() - if app.IsSidecar(p) { - // make watchers unmonitorable - monitor = noop.NewMonitor() - appName += "_monitoring" - } - - if p.ServicePort() == 0 { - // Applications without service ports defined are ran as through the process application type. - a, err = process.NewApplication( - o.bgContext, - p.ID(), - appName, - o.pipelineID, - o.config.LoggingConfig.Level.String(), - desc, - o.srv, - o.config, - o.logger, - o.reporter, - monitor, - o.statusController) - } else { - // Service port is defined application is ran with service application type, with it fetching - // the connection credentials through the defined service port. - a, err = service.NewApplication( - o.bgContext, - p.ID(), - appName, - o.pipelineID, - o.config.LoggingConfig.Level.String(), - p.ServicePort(), - desc, - o.srv, - o.config, - o.logger, - o.reporter, - monitor, - o.statusController) - } - - if err != nil { - return nil, err - } - - o.apps[id] = a - return a, nil -} - -func (o *Operator) deleteApp(p Descriptor) { - o.appsLock.Lock() - defer o.appsLock.Unlock() - - id := p.ID() - - o.logger.Debugf("operator is removing %s from app collection: %v", p.ID(), o.apps) - delete(o.apps, id) -} diff --git a/internal/pkg/agent/operation/operator_handlers.go b/internal/pkg/agent/operation/operator_handlers.go deleted file mode 100644 index 36f10b3d70e..00000000000 --- a/internal/pkg/agent/operation/operator_handlers.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "fmt" - "strings" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/release" -) - -type handleFunc func(step configrequest.Step) error - -func (o *Operator) initHandlerMap() { - hm := make(map[string]handleFunc) - - hm[configrequest.StepRun] = o.handleRun - hm[configrequest.StepRemove] = o.handleRemove - - o.handlers = hm -} - -func (o *Operator) handleRun(step configrequest.Step) error { - if strings.EqualFold(step.ProgramSpec.CommandName(), monitoringName) { - return o.handleStartSidecar(step) - } - - p, cfg, err := getProgramFromStep(step, o.config.DownloadConfig) - if err != nil { - return errors.New(err, - "operator.handleStart failed to create program", - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName())) - } - - return o.start(p, cfg) -} - -func (o *Operator) handleRemove(step configrequest.Step) error { - o.logger.Debugf("stopping process %s: %s", step.ProgramSpec.CommandName(), step.ID) - if strings.EqualFold(step.ProgramSpec.CommandName(), monitoringName) { - return o.handleStopSidecar(step) - } - - p, _, err := getProgramFromStep(step, o.config.DownloadConfig) - if err != nil { - return errors.New(err, - "operator.handleRemove failed to stop program", - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName())) - } - - return o.stop(p) -} - -func getProgramFromStep(step configrequest.Step, artifactConfig *artifact.Config) (Descriptor, map[string]interface{}, error) { - return getProgramFromStepWithTags(step, artifactConfig, nil) -} - -func getProgramFromStepWithTags(step configrequest.Step, artifactConfig *artifact.Config, tags map[app.Tag]string) (Descriptor, map[string]interface{}, error) { - config, err := getConfigFromStep(step) - if err != nil { - return nil, nil, err - } - - version := step.Version - if release.Snapshot() { - version = fmt.Sprintf("%s-SNAPSHOT", version) - } - - p := app.NewDescriptor(step.ProgramSpec, version, artifactConfig, tags) - return p, config, nil -} - -func getConfigFromStep(step configrequest.Step) (map[string]interface{}, error) { - metConfig, hasConfig := step.Meta[configrequest.MetaConfigKey] - - if !hasConfig && needsMetaConfig(step) { - return nil, fmt.Errorf("step: %s, no config in metadata", step.ID) - } - - var config map[string]interface{} - if hasConfig { - var ok bool - config, ok = metConfig.(map[string]interface{}) - if !ok { - return nil, errors.New(errors.TypeConfig, - fmt.Sprintf("step: %s, program config is in invalid format", step.ID)) - } - } - - return config, nil -} - -func needsMetaConfig(step configrequest.Step) bool { - return step.ID == configrequest.StepRun -} diff --git a/internal/pkg/agent/operation/operator_test.go b/internal/pkg/agent/operation/operator_test.go deleted file mode 100644 index 8400918a023..00000000000 --- a/internal/pkg/agent/operation/operator_test.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "fmt" - "math/rand" - "net" - "os" - "os/exec" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/state" -) - -func TestMain(m *testing.M) { - // init supported with test cases - port, err := getFreePort() - if err != nil { - panic(err) - } - - configurableSpec := program.Spec{ - Name: "configurable", - Cmd: "configurable", - Args: []string{}, - } - - serviceSpec := program.Spec{ - ServicePort: port, - Name: "serviceable", - Cmd: "serviceable", - Args: []string{fmt.Sprintf("%d", port)}, - } - - program.Supported = append(program.Supported, configurableSpec, serviceSpec) - program.SupportedMap["configurable"] = configurableSpec - program.SupportedMap["serviceable"] = serviceSpec - - if err := isAvailable("configurable"); err != nil { - panic(err) - } - if err := isAvailable("serviceable"); err != nil { - panic(err) - } - - os.Exit(m.Run()) -} - -func TestNotSupported(t *testing.T) { - p := getProgram("notsupported", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - err := operator.start(p, nil) - if err == nil { - t.Fatal("was expecting error but got none") - } -} - -func TestConfigurableRun(t *testing.T) { - p := getProgram("configurable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - return nil - }) - - // try to configure - cfg := make(map[string]interface{}) - tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) - cfg["TestFile"] = tstFilePath - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - waitFor(t, func() error { - if s, err := os.Stat(tstFilePath); err != nil || s == nil { - return fmt.Errorf("failed to create a file using Config call %s", tstFilePath) - } - return nil - }) - - // wait to finish configuring - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if ok && item.Status == state.Configuring { - return fmt.Errorf("process still configuring") - } - return nil - }) - - items := operator.State() - item0, ok := items[p.ID()] - if !ok || item0.Status != state.Healthy { - t.Fatalf("Process no longer running after config %#v", items) - } - pid := item0.ProcessInfo.PID - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop process with PID %d: %v", pid, err) - } - - waitFor(t, func() error { - items := operator.State() - _, ok := items[p.ID()] - if ok { - return fmt.Errorf("state for process, should be removed") - } - return nil - }) - - // check process stopped - proc, err := os.FindProcess(pid) - if err != nil && proc != nil { - t.Fatal("Process found") - } -} - -func TestConfigurableFailed(t *testing.T) { - p := getProgram("configurable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - var pid int - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - pid = item.ProcessInfo.PID - return nil - }) - items := operator.State() - item, ok := items[p.ID()] - if !ok { - t.Fatalf("no state for process") - } - assert.Equal(t, map[string]interface{}{ - "status": float64(proto.StateObserved_HEALTHY), - "message": "Running", - }, item.Payload) - - // try to configure (with failed status) - cfg := make(map[string]interface{}) - tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) - cfg["TestFile"] = tstFilePath - cfg["Status"] = proto.StateObserved_FAILED - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - // should still create the file - waitFor(t, func() error { - if s, err := os.Stat(tstFilePath); err != nil || s == nil { - return fmt.Errorf("failed to create a file using Config call %s", tstFilePath) - } - return nil - }) - - // wait for not running status - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status == state.Healthy { - return fmt.Errorf("process never left running") - } - return nil - }) - - // don't send status anymore - delete(cfg, "Status") - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - // check that it restarted (has a new PID) - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.ProcessInfo == nil { - return fmt.Errorf("in restart loop") - } - if pid == item.ProcessInfo.PID { - return fmt.Errorf("process never restarted") - } - pid = item.ProcessInfo.PID - return nil - }) - - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to back to running") - } - return nil - }) - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop process with PID %d: %v", pid, err) - } - - // check process stopped - proc, err := os.FindProcess(pid) - if err != nil && proc != nil { - t.Fatal("Process found") - } -} - -func TestConfigurableCrash(t *testing.T) { - p := getProgram("configurable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - var pid int - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - pid = item.ProcessInfo.PID - return nil - }) - - // try to configure (with failed status) - cfg := make(map[string]interface{}) - tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) - cfg["TestFile"] = tstFilePath - cfg["Crash"] = true - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - // should still create the file - waitFor(t, func() error { - if s, err := os.Stat(tstFilePath); err != nil || s == nil { - return fmt.Errorf("failed to create a file using Config call %s", tstFilePath) - } - return nil - }) - - // wait for not running status - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status == state.Healthy { - return fmt.Errorf("process never left running") - } - return nil - }) - - // don't send crash anymore - delete(cfg, "Crash") - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - // check that it restarted (has a new PID) - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.ProcessInfo == nil { - return fmt.Errorf("in restart loop") - } - if pid == item.ProcessInfo.PID { - return fmt.Errorf("process never restarted") - } - pid = item.ProcessInfo.PID - return nil - }) - - // let the process get back to ready - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to back to running") - } - return nil - }) - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop process with PID %d: %v", pid, err) - } - - // check process stopped - proc, err := os.FindProcess(pid) - if err != nil && proc != nil { - t.Fatal("Process found") - } -} - -func TestConfigurableStartStop(t *testing.T) { - p := getProgram("configurable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - // start and stop it 3 times - for i := 0; i < 3; i++ { - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - return nil - }) - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop process: %v", err) - } - - waitFor(t, func() error { - items := operator.State() - _, ok := items[p.ID()] - if ok { - return fmt.Errorf("state for process, should be removed") - } - return nil - }) - } -} - -func TestConfigurableService(t *testing.T) { - t.Skip("Flaky test: https://github.com/elastic/beats/issues/23607") - p := getProgram("serviceable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - // emulating a service, so we need to start the binary here in the test - spec := p.ProcessSpec() - cmd := exec.Command(spec.BinaryPath, fmt.Sprintf("%d", p.ServicePort())) - cmd.Env = append(cmd.Env, os.Environ()...) - cmd.Dir = filepath.Dir(spec.BinaryPath) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Start(); err != nil { - t.Fatal(err) - } - - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - return nil - }) - - // try to configure - cfg := make(map[string]interface{}) - tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) - cfg["TestFile"] = tstFilePath - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - waitFor(t, func() error { - if s, err := os.Stat(tstFilePath); err != nil || s == nil { - return fmt.Errorf("failed to create a file using Config call %s", tstFilePath) - } - return nil - }) - - // wait to finish configuring - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if ok && item.Status == state.Configuring { - return fmt.Errorf("process still configuring") - } - return nil - }) - - items := operator.State() - item0, ok := items[p.ID()] - if !ok || item0.Status != state.Healthy { - t.Fatalf("Process no longer running after config %#v", items) - } - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop service: %v", err) - } - - if err := cmd.Wait(); err != nil { - t.Fatalf("Process failed: %v", err) - } -} - -func isAvailable(name string) error { - p := getProgram(name, "version") - spec := p.ProcessSpec() - path := spec.BinaryPath - - if s, err := os.Stat(path); err != nil || s == nil { - return fmt.Errorf("binary not available %s: %v", spec.BinaryPath, err) - } - return nil -} - -// getFreePort finds a free port. -func getFreePort() (int, error) { - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err != nil { - return 0, err - } - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return 0, err - } - defer l.Close() - return l.Addr().(*net.TCPAddr).Port, nil -} diff --git a/internal/pkg/agent/operation/tests/downloads/-1.0-darwin-x86_64.tar.gz b/internal/pkg/agent/operation/tests/downloads/-1.0-darwin-x86_64.tar.gz deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/internal/pkg/agent/operation/tests/downloads/configurable-1.0-darwin-x86_64.tar.gz b/internal/pkg/agent/operation/tests/downloads/configurable-1.0-darwin-x86_64.tar.gz deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/README.md b/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/README.md deleted file mode 100644 index 309d9b655d8..00000000000 --- a/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/README.md +++ /dev/null @@ -1 +0,0 @@ -Testing program emulating tool which is configurable using GRPC communication channel when running as a sub-process. diff --git a/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/main.go b/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/main.go deleted file mode 100644 index 1e6c88106b6..00000000000 --- a/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/main.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" -) - -func main() { - f, _ := os.OpenFile(filepath.Join(os.TempDir(), "testing.out"), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) - f.WriteString("starting \n") - ctx, cancel := context.WithCancel(context.Background()) - s := &configServer{ - f: f, - ctx: ctx, - cancel: cancel, - } - client, err := client.NewFromReader(os.Stdin, s) - if err != nil { - f.WriteString(err.Error()) - panic(err) - } - s.client = client - err = client.Start(ctx) - if err != nil { - f.WriteString(err.Error()) - panic(err) - } - <-ctx.Done() - f.WriteString("finished \n") -} - -type configServer struct { - f *os.File - ctx context.Context - cancel context.CancelFunc - client client.Client -} - -func (s *configServer) OnConfig(cfgString string) { - s.client.Status(proto.StateObserved_CONFIGURING, "Writing config file", nil) - - testCfg := &TestConfig{} - if err := yaml.Unmarshal([]byte(cfgString), &testCfg); err != nil { - s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to unmarshall config: %s", err), nil) - return - } - - if testCfg.TestFile != "" { - tf, err := os.Create(testCfg.TestFile) - if err != nil { - s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to create file %s: %s", testCfg.TestFile, err), nil) - return - } - - err = tf.Close() - if err != nil { - s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to close file %s: %s", testCfg.TestFile, err), nil) - return - } - } - - if testCfg.Crash { - os.Exit(2) - } - - if testCfg.Status != nil { - s.client.Status(*testCfg.Status, "Custom status", map[string]interface{}{ - "status": *testCfg.Status, - "message": "Custom status", - }) - } else { - s.client.Status(proto.StateObserved_HEALTHY, "Running", map[string]interface{}{ - "status": proto.StateObserved_HEALTHY, - "message": "Running", - }) - } -} - -func (s *configServer) OnStop() { - s.client.Status(proto.StateObserved_STOPPING, "Stopping", nil) - s.cancel() -} - -func (s *configServer) OnError(err error) { - s.f.WriteString(err.Error()) -} - -// TestConfig is a configuration for testing Config calls -type TestConfig struct { - TestFile string `config:"TestFile" yaml:"TestFile"` - Status *proto.StateObserved_Status `config:"Status" yaml:"Status"` - Crash bool `config:"Crash" yaml:"Crash"` -} diff --git a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/README.md b/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/README.md deleted file mode 100644 index da8cc52049c..00000000000 --- a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/README.md +++ /dev/null @@ -1 +0,0 @@ -Testing program emulating tool which is configurable using GRPC communication channel when running as an external service. diff --git a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go b/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go deleted file mode 100644 index 99ceab143f1..00000000000 --- a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "net" - "os" - "path/filepath" - "strconv" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - protobuf "google.golang.org/protobuf/proto" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" -) - -func main() { - srvPort, err := strconv.Atoi(os.Args[1]) - if err != nil { - panic(err) - } - f, _ := os.OpenFile(filepath.Join(os.TempDir(), "testing.out"), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) - _, _ = f.WriteString("starting \n") - ctx, cancel := context.WithCancel(context.Background()) - s := &configServer{ - f: f, - ctx: ctx, - cancel: cancel, - } - _, _ = f.WriteString(fmt.Sprintf("reading creds from port: %d\n", srvPort)) - client, err := clientFromNet(srvPort, s) - if err != nil { - _, _ = f.WriteString(err.Error()) - panic(err) - } - s.client = client - err = client.Start(ctx) - if err != nil { - _, _ = f.WriteString(err.Error()) - panic(err) - } - <-ctx.Done() - _, _ = f.WriteString("finished \n") -} - -type configServer struct { - f *os.File - ctx context.Context - cancel context.CancelFunc - client client.Client -} - -func (s *configServer) OnConfig(cfgString string) { - _ = s.client.Status(proto.StateObserved_CONFIGURING, "Writing config file", nil) - - testCfg := &TestConfig{} - if err := yaml.Unmarshal([]byte(cfgString), &testCfg); err != nil { - _ = s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to unmarshall config: %s", err), nil) - return - } - - if testCfg.TestFile != "" { - tf, err := os.Create(testCfg.TestFile) - if err != nil { - _ = s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to create file %s: %s", testCfg.TestFile, err), nil) - return - } - - err = tf.Close() - if err != nil { - _ = s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to close file %s: %s", testCfg.TestFile, err), nil) - return - } - } - - _ = s.client.Status(proto.StateObserved_HEALTHY, "Running", map[string]interface{}{ - "status": proto.StateObserved_HEALTHY, - "message": "Running", - }) -} - -func (s *configServer) OnStop() { - _ = s.client.Status(proto.StateObserved_STOPPING, "Stopping", nil) - s.cancel() -} - -func (s *configServer) OnError(err error) { - _, _ = s.f.WriteString(err.Error()) -} - -// TestConfig is a configuration for testing Config calls -type TestConfig struct { - TestFile string `config:"TestFile" yaml:"TestFile"` -} - -func getCreds(port int) (*proto.ConnInfo, error) { - c, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", port)) - if err != nil { - return nil, err - } - defer c.Close() - buf := make([]byte, 1024*1024) - n, err := c.Read(buf) - if err != nil { - return nil, err - } - var connInfo proto.ConnInfo - err = protobuf.Unmarshal(buf[:n], &connInfo) - if err != nil { - return nil, err - } - return &connInfo, nil -} - -func clientFromNet(port int, impl client.StateInterface, actions ...client.Action) (client.Client, error) { - connInfo, err := getCreds(port) - if err != nil { - return nil, err - } - cert, err := tls.X509KeyPair(connInfo.PeerCert, connInfo.PeerKey) - if err != nil { - return nil, err - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(connInfo.CaCert) - trans := credentials.NewTLS(&tls.Config{ - ServerName: connInfo.ServerName, - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - MinVersion: tls.VersionTLS12, - }) - return client.New(connInfo.Addr, connInfo.Token, impl, actions, grpc.WithTransportCredentials(trans)), nil -} diff --git a/internal/pkg/agent/program/program.go b/internal/pkg/agent/program/program.go deleted file mode 100644 index 08f30a81609..00000000000 --- a/internal/pkg/agent/program/program.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package program - -import ( - "fmt" - "strings" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/eql" -) - -// Program represents a program that must be started or must run . -type Program struct { - Spec Spec - Config *transpiler.AST -} - -// Cmd return the execution command to run. -func (p *Program) Cmd() string { - return p.Spec.Command() -} - -// Checksum return the checksum of the current instance of the program. -func (p *Program) Checksum() string { - return p.Config.HashStr() -} - -// Identifier returns the Program unique identifier. -func (p *Program) Identifier() string { - return strings.ToLower(p.Spec.Name) -} - -// Configuration return the program configuration in a map[string]iface format. -func (p *Program) Configuration() map[string]interface{} { - m, err := p.Config.Map() - if err != nil { - // TODO, that should not panic, refactor to remove any panic. - // Will refactor to never return an error at this stage. - panic(err) - } - return m -} - -// Programs take a Tree representation of the main configuration and apply all the different -// programs rules and generate individual configuration from the rules. -func Programs(agentInfo transpiler.AgentInfo, singleConfig *transpiler.AST) (map[string][]Program, error) { - grouped, err := groupByOutputs(singleConfig) - if err != nil { - return nil, errors.New(err, errors.TypeConfig, "fail to extract program configuration") - } - - groupedPrograms := make(map[string][]Program) - for k, config := range grouped { - programs, err := DetectPrograms(agentInfo, config) - if err != nil { - return nil, errors.New(err, errors.TypeConfig, "fail to generate program configuration") - } - groupedPrograms[k] = programs - } - - return groupedPrograms, nil -} - -// DetectPrograms returns the list of programs detected from the provided configuration. -func DetectPrograms(agentInfo transpiler.AgentInfo, singleConfig *transpiler.AST) ([]Program, error) { - programs := make([]Program, 0) - for _, spec := range Supported { - specificAST := singleConfig.Clone() - ok, err := DetectProgram(spec.Rules, spec.When, spec.Constraints, agentInfo, specificAST) - if err != nil { - return nil, err - } - if !ok { - continue - } - program := Program{ - Spec: spec, - Config: specificAST, - } - programs = append(programs, program) - } - - return programs, nil -} - -// DetectProgram returns true or false if this program exists in the AST. -// -// Note `ast` is modified to match what the program expects. Should clone the AST before passing to -// this function if you want to still have the original. -func DetectProgram(rules *transpiler.RuleList, when string, constraints string, info transpiler.AgentInfo, ast *transpiler.AST) (bool, error) { - if len(constraints) > 0 { - constraints, err := eql.New(constraints) - if err != nil { - return false, err - } - ok, err := constraints.Eval(ast) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - } - - err := rules.Apply(info, ast) - if err != nil { - return false, err - } - - if len(when) == 0 { - return false, ErrMissingWhen - } - - expression, err := eql.New(when) - if err != nil { - return false, err - } - - return expression.Eval(ast) -} - -// KnownProgramNames returns a list of runnable programs by the elastic-agent. -func KnownProgramNames() []string { - names := make([]string, len(Supported)) - for idx, program := range Supported { - names[idx] = program.Name - } - - return names -} - -func groupByOutputs(single *transpiler.AST) (map[string]*transpiler.AST, error) { - const ( - outputsKey = "outputs" - outputKey = "output" - inputsKey = "inputs" - typeKey = "type" - ) - - if _, found := transpiler.Select(single, outputsKey); !found { - return nil, errors.New("invalid configuration missing outputs configuration") - } - - // Normalize using an intermediate map. - normMap, err := single.Map() - if err != nil { - return nil, errors.New(err, "could not read configuration") - } - - // Recreates multiple configuration grouped by the name of the outputs. - // Each configuration will be started into his own operator with the same name as the output. - grouped := make(map[string]*outputType) - - m, ok := normMap[outputsKey] - if !ok { - return nil, errors.New("fail to received a list of configured outputs") - } - - out, ok := m.(map[string]interface{}) - if !ok { - return nil, errors.New(fmt.Errorf( - "invalid outputs configuration received, expecting a map not a %T", - m, - )) - } - - for k, v := range out { - outputsOptions, ok := v.(map[string]interface{}) - if !ok { - return nil, errors.New("invalid type for output configuration block") - } - - t, ok := outputsOptions[typeKey] - if !ok { - return nil, fmt.Errorf("missing output type named output %s", k) - } - - n, ok := t.(string) - if !ok { - return nil, fmt.Errorf("invalid type received %T and expecting a string", t) - } - - delete(outputsOptions, typeKey) - - enabled, err := isEnabled(outputsOptions) - if err != nil { - return nil, err - } - - // Propagate global configuration to each individual configuration. - clone := cloneMap(normMap) - delete(clone, outputsKey) - clone[outputKey] = map[string]interface{}{n: v} - clone[inputsKey] = make([]map[string]interface{}, 0) - - grouped[k] = &outputType{ - enabled: enabled, - config: clone, - } - } - - s, ok := normMap[inputsKey] - if !ok { - s = make([]interface{}, 0) - } - - list, ok := s.([]interface{}) - if !ok { - return nil, errors.New("fail to receive a list of configured streams") - } - - for _, item := range list { - stream, ok := item.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf( - "invalid type for stream expecting a map of options and received %T", - item, - ) - } - targetName := findOutputName(stream) - - // Do we have configuration for that specific outputs if not we fail to load the configuration. - config, ok := grouped[targetName] - if !ok { - return nil, fmt.Errorf("unknown configuration output with name %s", targetName) - } - - streams := config.config[inputsKey].([]map[string]interface{}) - streams = append(streams, stream) - - config.config[inputsKey] = streams - grouped[targetName] = config - } - - transpiled := make(map[string]*transpiler.AST) - - for name, group := range grouped { - if !group.enabled { - continue - } - if len(group.config[inputsKey].([]map[string]interface{})) == 0 { - continue - } - - ast, err := transpiler.NewAST(group.config) - if err != nil { - return nil, errors.New(err, "fail to generate configuration for output name %s", name) - } - - transpiled[name] = ast - } - - return transpiled, nil -} - -func isEnabled(m map[string]interface{}) (bool, error) { - const ( - enabledKey = "enabled" - ) - - enabled, ok := m[enabledKey] - if !ok { - return true, nil - } - switch e := enabled.(type) { - case bool: - return e, nil - } - return false, fmt.Errorf("invalid type received for enabled %T and expecting a boolean", enabled) -} - -func findOutputName(m map[string]interface{}) string { - const ( - defaultOutputName = "default" - useOutputKey = "use_output" - ) - - output, ok := m[useOutputKey] - if !ok { - return defaultOutputName - } - - return output.(string) -} - -func cloneMap(m map[string]interface{}) map[string]interface{} { - newMap := make(map[string]interface{}) - for k, v := range m { - sV, ok := v.(map[string]interface{}) - if ok { - newMap[k] = cloneMap(sV) - continue - } - newMap[k] = v - } - - return newMap -} - -type outputType struct { - enabled bool - config map[string]interface{} -} diff --git a/internal/pkg/agent/program/program_test.go b/internal/pkg/agent/program/program_test.go deleted file mode 100644 index 4d12e40cc8e..00000000000 --- a/internal/pkg/agent/program/program_test.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package program - -import ( - "flag" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - yaml "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/internal/yamltest" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" -) - -var ( - generateFlag = flag.Bool("generate", false, "Write golden files") -) - -func TestGroupBy(t *testing.T) { - t.Run("only named output", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "special": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "use_output": "special", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - { - "type": "system/metrics", - "use_output": "special", - }, - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "infosec1", - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 2, len(grouped)) - - c1 := transpiler.MustNewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - "use_output": "special", - }, - { - "type": "system/metrics", - "use_output": "special", - }, - }, - }) - - c2, _ := transpiler.NewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "infosec1", - }, - }, - }) - - defaultConfig, ok := grouped["special"] - require.True(t, ok) - require.Equal(t, c1.Hash(), defaultConfig.Hash()) - - infosec1Config, ok := grouped["infosec1"] - - require.True(t, ok) - require.Equal(t, c2.Hash(), infosec1Config.Hash()) - }) - - t.Run("fail when the referenced named output doesn't exist", func(t *testing.T) { - sConfig := map[string]interface{}{ - "monitoring": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "localhost", - }, - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - "use_output": "special", - }, - { - "type": "system/metrics", - "use_output": "special", - }, - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "donotexist", - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - _, err = groupByOutputs(ast) - require.Error(t, err) - }) - - t.Run("only default output", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - - { - "type": "system/metrics", - }, - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 1, len(grouped)) - - c1 := transpiler.MustNewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - - { - "type": "system/metrics", - }, - - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - }, - }, - }) - - defaultConfig, ok := grouped["default"] - require.True(t, ok) - require.Equal(t, c1.Hash(), defaultConfig.Hash()) - - _, ok = grouped["infosec1"] - - require.False(t, ok) - }) - - t.Run("default and named output", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - - { - "type": "system/metrics", - }, - - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "infosec1", - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 2, len(grouped)) - - c1 := transpiler.MustNewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - - { - "type": "system/metrics", - }, - }, - }) - - c2, _ := transpiler.NewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "infosec1", - }, - }, - }) - - defaultConfig, ok := grouped["default"] - require.True(t, ok) - require.Equal(t, c1.Hash(), defaultConfig.Hash()) - - infosec1Config, ok := grouped["infosec1"] - - require.True(t, ok) - require.Equal(t, c2.Hash(), infosec1Config.Hash()) - }) - - t.Run("streams is an empty list", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "datasources": []map[string]interface{}{}, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 0, len(grouped)) - }) - - t.Run("no streams are defined", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 0, len(grouped)) - }) -} - -func TestConfiguration(t *testing.T) { - defer os.Remove("fleet.yml") - - testcases := map[string]struct { - programs map[string][]string - err bool - }{ - "namespace": { - programs: map[string][]string{ - "default": {"filebeat", "fleet-server", "heartbeat", "metricbeat", "endpoint", "packetbeat"}, - }, - }, - "logstash_config": { - programs: map[string][]string{ - "default": {"filebeat", "fleet-server", "heartbeat", "metricbeat", "endpoint", "packetbeat"}, - "elasticsearch": {"filebeat"}, - }, - }, - "single_config": { - programs: map[string][]string{ - "default": {"filebeat", "fleet-server", "heartbeat", "metricbeat", "endpoint", "packetbeat"}, - }, - }, - "audit_config": { - programs: map[string][]string{ - "default": {"auditbeat"}, - }, - }, - "fleet_server": { - programs: map[string][]string{ - "default": {"fleet-server"}, - }, - }, - "synthetics_config": { - programs: map[string][]string{ - "default": {"heartbeat"}, - }, - }, - "enabled_true": { - programs: map[string][]string{ - "default": {"filebeat"}, - }, - }, - "enabled_false": { - programs: map[string][]string{ - "default": {}, - }, - }, - "enabled_output_true": { - programs: map[string][]string{ - "default": {"filebeat"}, - }, - }, - "enabled_output_false": { - programs: map[string][]string{}, - }, - "endpoint_basic": { - programs: map[string][]string{ - "default": {"endpoint"}, - }, - }, - "endpoint_no_fleet": { - programs: map[string][]string{ - "default": {}, - }, - }, - "endpoint_unknown_output": { - programs: map[string][]string{ - "default": {}, - }, - }, - "endpoint_arm": { - programs: map[string][]string{ - "default": {}, - }, - }, - } - - for name, test := range testcases { - t.Run(name, func(t *testing.T) { - singleConfig, err := ioutil.ReadFile(filepath.Join("testdata", name+".yml")) - require.NoError(t, err) - - var m map[string]interface{} - err = yaml.Unmarshal(singleConfig, &m) - require.NoError(t, err) - - ast, err := transpiler.NewAST(m) - require.NoError(t, err) - - programs, err := Programs(&fakeAgentInfo{}, ast) - if test.err { - require.Error(t, err) - return - } - require.NoError(t, err) - require.Equal(t, len(test.programs), len(programs)) - - if len(programs) > 0 { - _, containsDefault := programs["default"] - require.True(t, containsDefault) - } - - for progKey, progs := range programs { - testPrograms, isExpectedProgram := test.programs[progKey] - require.True(t, isExpectedProgram) - require.Equal(t, len(testPrograms), len(progs)) - - for _, program := range progs { - filename := name + "-" + program.Spec.CommandName() - if progKey != "default" { - filename += "-" + progKey - } - programConfig, err := ioutil.ReadFile(filepath.Join( - "testdata", - filename+".yml", - )) - - require.NoError(t, err) - var m map[string]interface{} - err = yamltest.FromYAML(programConfig, &m) - require.NoError(t, errors.Wrap(err, program.Cmd())) - - compareMap := &transpiler.MapVisitor{} - program.Config.Accept(compareMap) - - if !assert.True(t, cmp.Equal(m, compareMap.Content)) { - diff := cmp.Diff(m, compareMap.Content) - if diff != "" { - t.Errorf("%s-%s mismatch (-want +got):\n%s", name, program.Spec.Name, diff) - } - } - } - } - - }) - } -} - -func TestUseCases(t *testing.T) { - defer os.Remove("fleet.yml") - - useCasesPath := filepath.Join("testdata", "usecases") - useCases, err := filepath.Glob(filepath.Join(useCasesPath, "*.yml")) - require.NoError(t, err) - - generatedFilesDir := filepath.Join(useCasesPath, "generated") - - // Cleanup all generated files to make sure not having any left overs - if *generateFlag { - err := os.RemoveAll(generatedFilesDir) - require.NoError(t, err) - } - - for _, usecase := range useCases { - t.Run(usecase, func(t *testing.T) { - - useCaseName := strings.TrimSuffix(filepath.Base(usecase), ".yml") - singleConfig, err := ioutil.ReadFile(usecase) - require.NoError(t, err) - - var m map[string]interface{} - err = yaml.Unmarshal(singleConfig, &m) - require.NoError(t, err) - - ast, err := transpiler.NewAST(m) - require.NoError(t, err) - - programs, err := Programs(&fakeAgentInfo{}, ast) - require.NoError(t, err) - - require.Equal(t, 1, len(programs)) - - defPrograms, ok := programs["default"] - require.True(t, ok) - - for _, program := range defPrograms { - generatedPath := filepath.Join( - useCasesPath, "generated", - useCaseName+"."+program.Spec.CommandName()+".golden.yml", - ) - - compareMap := &transpiler.MapVisitor{} - program.Config.Accept(compareMap) - - // Generate new golden file for programm - if *generateFlag { - d, err := yaml.Marshal(&compareMap.Content) - require.NoError(t, err) - - err = os.MkdirAll(generatedFilesDir, 0755) - require.NoError(t, err) - err = ioutil.WriteFile(generatedPath, d, 0644) - require.NoError(t, err) - } - - programConfig, err := ioutil.ReadFile(generatedPath) - require.NoError(t, err) - - var m map[string]interface{} - err = yamltest.FromYAML(programConfig, &m) - require.NoError(t, errors.Wrap(err, program.Cmd())) - - if !assert.True(t, cmp.Equal(m, compareMap.Content)) { - diff := cmp.Diff(m, compareMap.Content) - if diff != "" { - t.Errorf("%s-%s mismatch (-want +got):\n%s", usecase, program.Spec.Name, diff) - } - } - } - }) - } -} - -type fakeAgentInfo struct{} - -func (*fakeAgentInfo) AgentID() string { - return "agent-id" -} - -func (*fakeAgentInfo) Version() string { - return "8.0.0" -} - -func (*fakeAgentInfo) Snapshot() bool { - return false -} - -func (*fakeAgentInfo) Headers() map[string]string { - return map[string]string{ - "h1": "test-header", - } -} diff --git a/internal/pkg/agent/program/spec.go b/internal/pkg/agent/program/spec.go deleted file mode 100644 index 0b3a8eeb347..00000000000 --- a/internal/pkg/agent/program/spec.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package program - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "runtime" - "strings" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" -) - -// ErrMissingWhen is returned when no boolean expression is defined for a program. -var ErrMissingWhen = errors.New("program must define a 'When' expression") - -// Spec represents a specific program specification, it contains information about how to run the -// program and also the rules to apply to the single configuration to create a specific program -// configuration. -// -// NOTE: Current spec are build at compile time, we want to revisit that to allow other program -// to register their spec in a secure way. -type Spec struct { - Name string `yaml:"name"` - ServicePort int `yaml:"service,omitempty"` - Cmd string `yaml:"cmd"` - Args []string `yaml:"args"` - Artifact string `yaml:"artifact"` - ActionInputTypes []string `yaml:"action_input_types,omitempty"` - LogPaths map[string]string `yaml:"log_paths,omitempty"` - MetricEndpoints map[string]string `yaml:"metric_endpoints,omitempty"` - Rules *transpiler.RuleList `yaml:"rules"` - CheckInstallSteps *transpiler.StepList `yaml:"check_install"` - PostInstallSteps *transpiler.StepList `yaml:"post_install"` - PreUninstallSteps *transpiler.StepList `yaml:"pre_uninstall"` - When string `yaml:"when"` - Constraints string `yaml:"constraints"` - RestartOnOutputChange bool `yaml:"restart_on_output_change,omitempty"` - ExportedMetrics []string `yaml:"exported_metrics,omitempty"` -} - -func (s *Spec) Command() string { - name := strings.ToLower(s.Cmd) - if runtime.GOOS == "windows" && !strings.HasSuffix(name, ".exe") { - return name + ".exe" - } - - return name -} - -func (s *Spec) CommandName() string { - return strings.ToLower(s.Cmd) -} - -// ReadSpecs reads all the specs that match the provided globbing path. -func ReadSpecs(path string) ([]Spec, error) { - var specs []Spec - files, err := filepath.Glob(path) - if err != nil { - return []Spec{}, errors.New(err, "could not include spec", errors.TypeConfig) - } - - for _, f := range files { - b, err := ioutil.ReadFile(f) - if err != nil { - return []Spec{}, errors.New(err, fmt.Sprintf("could not read spec %s", f), errors.TypeConfig) - } - - spec := Spec{} - if err := yaml.Unmarshal(b, &spec); err != nil { - return []Spec{}, errors.New(err, fmt.Sprintf("could not unmarshal YAML for file %s", f), errors.TypeConfig) - } - specs = append(specs, spec) - } - - return specs, nil -} - -// NewSpecFromBytes create a Spec from a bytes. -func NewSpecFromBytes(b []byte) (Spec, error) { - spec := Spec{} - if err := yaml.Unmarshal(b, &spec); err != nil { - return Spec{}, errors.New(err, "could not unmarshal YAML", errors.TypeConfig) - } - return spec, nil -} - -// MustReadSpecs read specs and panic on errors. -func MustReadSpecs(path string) []Spec { - s, err := ReadSpecs(path) - if err != nil { - panic(err) - } - return s -} - -// FindSpecByName find a spec by name and return it or false if we cannot find it. -func FindSpecByName(name string) (Spec, bool) { - for _, candidate := range Supported { - if name == candidate.Name { - return candidate, true - } - } - return Spec{}, false -} diff --git a/internal/pkg/agent/program/spec_test.go b/internal/pkg/agent/program/spec_test.go deleted file mode 100644 index 110dd92eb36..00000000000 --- a/internal/pkg/agent/program/spec_test.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package program - -import ( - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" -) - -func TestSerialization(t *testing.T) { - spec := Spec{ - Name: "hello", - Cmd: "hellocmd", - Args: []string{"-c", "first"}, - Artifact: "nested/hellocmd", - Rules: transpiler.NewRuleList( - transpiler.Copy("inputs", "filebeat"), - transpiler.Filter("filebeat", "output", "keystore"), - transpiler.Rename("filebeat", "notfilebeat"), - transpiler.Translate("type", map[string]interface{}{ - "event/file": "log", - "event/stdin": "stdin", - }), - transpiler.TranslateWithRegexp("type", regexp.MustCompile("^metric/(.+)"), "$1/hello"), - transpiler.Map("inputs", - transpiler.Translate("type", map[string]interface{}{ - "event/file": "log", - })), - transpiler.FilterValues( - "inputs", - "type", - "log", - ), - ), - CheckInstallSteps: transpiler.NewStepList( - transpiler.ExecFile(25, "app", "verify", "--installed"), - ), - PostInstallSteps: transpiler.NewStepList( - transpiler.DeleteFile("d-1", true), - transpiler.MoveFile("m-1", "m-2", false), - ), - PreUninstallSteps: transpiler.NewStepList( - transpiler.ExecFile(30, "app", "uninstall", "--force"), - ), - When: "1 == 1", - Constraints: "2 == 2", - } - yml := `name: hello -cmd: hellocmd -args: -- -c -- first -artifact: nested/hellocmd -rules: -- copy: - from: inputs - to: filebeat -- filter: - selectors: - - filebeat - - output - - keystore -- rename: - from: filebeat - to: notfilebeat -- translate: - path: type - mapper: - event/file: log - event/stdin: stdin -- translate_with_regexp: - path: type - re: ^metric/(.+) - with: $1/hello -- map: - path: inputs - rules: - - translate: - path: type - mapper: - event/file: log -- filter_values: - selector: inputs - key: type - values: - - log -check_install: -- exec_file: - path: app - args: - - verify - - --installed - timeout: 25 -post_install: -- delete_file: - path: d-1 - fail_on_missing: true -- move_file: - path: m-1 - target: m-2 - fail_on_missing: false -pre_uninstall: -- exec_file: - path: app - args: - - uninstall - - --force - timeout: 30 -when: 1 == 1 -constraints: 2 == 2 -` - t.Run("serialization", func(t *testing.T) { - b, err := yaml.Marshal(spec) - require.NoError(t, err) - assert.Equal(t, string(b), yml) - }) - - t.Run("deserialization", func(t *testing.T) { - s := Spec{} - err := yaml.Unmarshal([]byte(yml), &s) - require.NoError(t, err) - assert.Equal(t, spec, s) - }) -} - -func TestExport(t *testing.T) { - dir, err := ioutil.TempDir("", "test_export") - require.NoError(t, err) - defer os.RemoveAll(dir) - - for _, spec := range Supported { - b, err := yaml.Marshal(spec) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(dir, strings.ToLower(spec.Name)+".yml"), b, 0666) - require.NoError(t, err) - } -} diff --git a/internal/pkg/agent/program/supported.go b/internal/pkg/agent/program/supported.go deleted file mode 100644 index 4bd218a4ee9..00000000000 --- a/internal/pkg/agent/program/supported.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// Code generated by elastic-agent/internals/dev-tools/buildspec/buildspec.go - DO NOT EDIT. - -package program - -import ( - "strings" - - "github.com/elastic/elastic-agent/pkg/packer" -) - -var Supported []Spec -var SupportedMap map[string]Spec - -func init() { - // Packed Files - // internal/spec/apm-server.yml - // internal/spec/auditbeat.yml - // internal/spec/cloudbeat.yml - // internal/spec/endpoint.yml - // internal/spec/filebeat.yml - // internal/spec/fleet-server.yml - // internal/spec/heartbeat.yml - // internal/spec/metricbeat.yml - // internal/spec/osquerybeat.yml - // internal/spec/packetbeat.yml - unpacked := packer.MustUnpack("eJzMe1uTo7iW7vv5Gf16zpnNJZ3dTMR+MGRxs5Mq4zISekOSE2wL7E4b2zAx/31C4mLAZFZWdvWe/VARlVjosrQu3/rW4r9+22Sn9WsWsX8cD2vyj+iQ/v/j+vW8fv2PImW//edvODVP6Ps+XgS6Nw88RjLESHzYYrB4dCzzgpdyiaCrIOjMQuhKEUBJqI7+lpFyH4PLPnYM5+QvnaNjuKcQTBKkBCcEJtI8DfIQuEcEFhq1XRktnaOxmcbORjadzSV2UrqFqs5I6jGcLTTXPumrL/J3P3CBH7gvvqTZi3J/fX7SNSc+UCMNHoilFdQKdlCRGbXdQ6g+PzrmceYY000I9dMc1mfaOEeDSTOSBUcEnx/5uvOlvsWqPoGqf4bK9UDUhXjuGNPYsZiEgPToWOiIQCC1z23//HWjH3Cmy9R+nolnxjTGyuQlVLQcpddDJZ/JGatT/vvJseSEPO3bscQypehpH6P0yhBc3J539tY8my/1AgH5TNPgJVKCydd43/5W/dNfEdzx+9iGSlASWUuIxcTYT81ju6ySKcvRpTtGikkanLCKGFRObP39dp7mn5h3o/P7zul0L95BKXuAqieRNEjw9328VqVaJuiAbZ8RpikhuMq9c9sew1awpZZWjMm6XkdaQ53d3kEJtgNGyt6+TkJPF+1ejtQKitvZ9RKBKwtV/0yyO7nfrVvNp8nU1uXqfDfZdO7y5Fgsj9JgS01tj4C5Q9Atv270318WBzWygvzrRj8iMMmoFe9d+1Sv42mz5fT/Ok/TOASTnWMlCZFObL2Md2ulXtOWjo5BGbbMklpsS5QgIam3d4tL7KouQxYr3eLC95BFiplGypdsbkwzbGkZUf2EKHE2W+z/+dv/G3qFnG5OeB2dhk4BBrsI+hsETMlIvRN62sdh75lZoI7BzcHzZi4M7zZmngbHEHpSBJ5PIaDcsPM1kI/O5lK9s1qJd4gSSBRO8whcT11HgVLzSJTVZj7tOZYSQV8mxkRCQL5gy5TQcsJwam6wFey+Aa4MHhuugYGZiz2AIKdGZzz0Cwru1jhihWYRmGTz9MpoGhy/AZ+FWZAN50WK90KsoAi54j9Ju7VkBv4XtlrsAj0wNfu7RJ++br9cnm1puEZCgX+glstCuMgrB8KyeSqfkS2cQY7AJAm5cj3JaQiuJVo6M7g4JCTzDyg1t5TfSRok1H4+9+4mc5m49Mx/4c4Jq4FE7ECCircPwSTj9yuc8GLfcYrjBmtsnuMFuB6dL6aMLCaJ9YzaQIzKEEmhbxCgB7zR1Qj6e8eg1dmMP7J5vB8xeO+FKkyKTK1AgLK1Pa0NsHFE/oRYq8r4bJd93UwzmFbrz4uHV7d2TkQJjgh4EladR254/Izksp85ha4jyy8dgx5w6rP10z4WhlDIvyPLlMJAK6ntshBIufgbeoza0mmt+Am1zBei+gUC5mmeThIMgpJY5hZBKeMBwLGCJFTiOAKTC4WLnMstApM/+T6gwnJkBQ+Nk6E2u3BZi7W7+7J9FRd6giz/BaWMYfFML7DiMaJ63CFxJ3TG6SpGqVY4VqBUQVTsr0RwUTsKLUdLPcWqw+fehdBP2vtbTsTf3Lbmxt1v4t7mhp7gdBEPZSJ8APQuIfBYdX+tU6vHyC/Eds8i6Cha61TvAyHLiRIU1NQSlPmMvC2XJ6xMFARdqblHIEt5cw9EPlH+nFp/1HL3S/670FmYSITbgqkdIyjugPuFstlTE3Cb92o9O1ArOH3d6M15mucSLjtjxdk7AdzWE2rFXV0bC95b50v1e6XHQelY6Ew2ut6xd263WQQeYmEHxjSr7HdxdouH2IeIkYxJEQ/EXE+47Da6FFkml0/ZrmML+UgILOIQPsfUSphj1f5jqechkPn9NUGNyyknyjWhVvCG/fVBirinRSfY3dlinM+KXdZ9V8z7tI/dpy+zDngQe+kBDwsVWJH6tm7c7gADbUfBlXVkmjrWl7i1V0M/kEIviRVsI4AOQgaKltM0KLhfqmUldMZVGrvxeTDGrgB4MnWLh3aPGJivKNASkrlJH+C0utLaU++81h+PjnWbv/7txGWFgaagQBM+rftODWKG833knTZu/ew6re0v9j8GhZYAeUKe/J4joOXtPZguIzA4kHQVhzwuWN4Zp+iACl3Chb7Fiswc29+TVJORElc6+cXMuP9yjCTHhX7EipdgQ08jcGWk2OU/cZYqrphawXUDKuiAraC207dAZQfobe/8QA2ga5upZUNT80hBUHZkwsGk0Hk07T4T679EgGMRWkTQ7wBvKSYqK7nuGPF+63wxd8h+jqlNz45l7pCppREIjvxZ15eSQgDNxq/HDW6KwKjPbwEhVCoZz1O2mxsjzzMvD6FeRpYmjf/OfY53wMrDIKZVv4vYlv7x6NgnrZK3dyZ2c0d6SlLtdAfYq4Spi3NmfO6hP0CZe+Z4o48d+z62srMWG81qvNID9Tc84LZ3N1/212/tvcZEre5xnYfiXVbdVx3zDL0F8o0/qfxsDcwNrvOTNAJUJqkpbGUQMweJwH5GlYThLY9/QY5Ufz8z/N/7Z2/O+ZcSA5EMkJTP+yySgZ9KDAjb53QsMVCuZwoGiUHv2SAx+LcC+Qf6cXA8ANr9M26pxc6YaZXCm1oFhKsMlBvHEavBToDl6X7IPKQRTFqQWSuFUMq3gKRTA22ePWKFcofXH98490C7kFTbIuiVHBTfGA0th4pw1McIeFJlqB7jMggBkhB02oB3B87+pRl7X26DjLpiF0ztgFN0boypt79th2kZcQaN8feDQJv1nyn0OaifGRndI/Dw6FhXhlMqRQY3uNohqdLBeXqIn2vwHFlmuVSCCTfiRpdelpfYVZrk1ysRMItQiYXhfdog1xk97DfZ0B59MNkRi+vhIl4pwZZC90Dt3SxU5F0FGPwLVlhODblEwJNJyqT1QLfRbY4TsgLFELqBDqGyenSeQvXrUzwLgSecG6zlANUK3H4Vzi7Ika2feSA00usZydolhP6+ul8OQFw14vI0nPN3i21IahbrpWY2Tmsu3d6fq54UQp/NlesZFVpn/9Kfcz53wXVVP0ZgIvPEztloZ2Ivzj64JkT1D2Ghmbd3tJIK36EdsULO3XPONhP+bMN1gAp701RS7mNn9/wIzeuCpFpGUvPkfKlABjSv7X7F/5s1zCvh90atgECLn/1KRtdJvT0C3quQn+on2Lo81gkMC2VNgKFh0uKkHblAj4VqIECGU4+rmdc2qDk8iKcsXS+d27ONdOK65bQBcbohqs8DUtE8oxY7IaDJXBeey+mMWFop7CzzJA7y6jt+QMDjCesRfR8kRIY7rmfNPqwqJrTB1nDbubv7mi/l9k7qcSW1fEYyp/PMOc1hcEGqmyBrNXjuMqJoMkk9RoqODN6QY3/85DGC002TCEZAZtyev26myvPTdEZsl0E1yCMw4Tp1xE/72Xyps7UVbCsgumrAkND9r5vppqsH5GabzRoJSWkfZBqujNNWPzZdMDm8x3H5jOz7hwTPDUhBdeCX3/PtdZLQTRKHwJnLp9WLqZAX13MJQfdlOPYNMued5EcfxqITT4gpWIkzYWBehvbUY91tV8ZWb68/TIgEmZEGRS/h2N7LqmuTfab/LvbcscpiHxAdiMLOOB4Hi9Wc/gAc/vpY1Y9JBw50S2q7Cbcj7itxqkkOX091ZZz5BwxWeQjdbWRL8bfvUuwqZoG/h5JbVOu79qmgYCJ0dJ6iBAN2XMN6rGCxk4QapDqP4f9OsiAXvmk5OYXgcCZZPbYk2Ww5nd3FyJcNW49gVp/7LFAlAHVcFP62whmHyg43Ou5VpDKPUTu4zFN2xB/Ang2+5Qkfsa4JtVZjBO6g6iUnOOUJliySvM54iWTBPb61tAzxWFVMjggihp/kHQKujIofVtOs5epqfoCUvsnFmNSJDckbMoYUI3IQ1TizoBZLwwpbCoxMCq0r4wJBHtddCSocQ9Q6b2rbyDJzxHFGbbs/8leiOpMeeELPE+8dgnHjd4cY+W8nLudL/RTC6cDvD5PgOz9TVR4V84hNTcJyRVz2/E2VOA/IyR9WG5tcga0tjxF7wXOwTUuiFuJ+2/urdLxHOm/gYrDHseoo98fQZ/w+esTF5bP7b2W9QQCdSbp6lyQYEDg9wrY966WOzbZfhCKf4mfwX8S+b8TQ4L68IRn8d5LEIlaFYFKK3LZPODb76udMXHbAvJBeFbN+zv1ChcvE/7vvcH2gKc+Ha4wp7IP0K7hNgUH1JGSxXJzZ8g94sI7wOdniTC3vwvcX9vSuO0+wI0/7mAL/MthLhcMtbRspAccUO6x4r938s5pHYP8CAf9AZK3Elqbyc33d6NWzy/3556o3ITxnKPeCeOvI4SPEokyU4OWOFPobydX3ii6fXr9T9PnLpGivCDfM16U4hLScgypfp4ophUrc/w0+dzCO+Uihy+ag0o/IDm64vc75UarJOPWLddcHKRMpBCy/6YcUN7pze59uiSFfKHBLCm7jItuXiKl18Wcz757a/iXKvDO++eQ/sRoUODWP6DbuNQToNbztp2M3UoyhKXXmzBH0U66nt339UXrboCIfu9iuo7/zZd/O+N9r6PXW4bZ204dgR25z8Vzh4fYbPWCeH9/mrgj22if+dZK85eze1ImmqNyN0RWBLWJ1yv1mk3OEinZpCy1NMcgaI31F/l3xbj+Fu2+Y74Ok70+Ttp018kZmL8td/G0zvTgc1xj6PoTeHMHd3rVPNb/kaxxbo4avUD0mMLNBjo5BK/0oiMBzrnJKUHpKRJcH922CG/H2brEbwb5svT6Nd3j5VQ4Sr5r8peZt69zk1OY1N+6W5+IV32bIJ6z4zGHSoAus6o5qu6zigyjK3DhQdFi3uEtqCge9bpy7fPMHeeCobxov2HRzr1Mnb/s16w9t4Z09NDbxrj+vY0Krn/U+m71ARRQeRjlX0XlX6Cm2AkaNSdOVlzdz3Zpw2twuhm1Rt8UaN+6z4qgbfyCKW3hUPsI2cKsHWdNtN7lghWO7nShajazV4OP82WjHNuu2vDSygjSEwZHa41zzPXd8t489Vj1pwPHeyUl0vI13dOWN3sxTLv8qPkBVP5Ns8aO1S6Jc7rr+Gh8x307zoU52+YqOrKr9tvvTuzbWXf+O/x69r7vOvzpeV0XGu3eruZMzUVu96OQ7b2PdN/f5Dp7p2gGFHmtzxOmnOhFHcdlfmmM5EfkIUfUkVFafOtebzUefO+MtL4k/1eU40K3px2ojjZ02ObgVJMgKhB8SsX20dlLZxyA2/uJOyJGYmKyj17HuxqUVJCTrFzGj3rNOIPxoAfMTZNDPtEt/qDvxLkhzcOKXcyXhIEP6PBnzfuHyLqDKmugEhKrLkBI8dFugxwuCLk9M10TljjZh4mKLP/LZZUSRBwD1PVL5fWDbMczmbm6O9r7Q2XRRZQEbGsEYyP07CpN9x9wBr/+KgqWQDa2SoqUAq4Joo5ZZRgY5GPE/74uW6fr0uiEjFvgdBBJJ2bbWyPqjg7qRX6mp2vEPC5rWgAO2pB9TrA2Nm/kMQ/0oSos/bj/4yY8Xrmek8PSL5FhQIBcNWcGGAjKcNwtl7YKgu+Xzflv6v39fBavVjj19gJo9IegXEfDqHpGm72rC4bmgGMfOZbzpEfqyJmnAb7KgpnbGTGtSlZdQSRKcUm6dleZnLZ05DvE/0g9b331N07VW1HgWqHZoycZyPtrCUKUGN11Sn38VnfgO1XtHHQ7gT53uBlqCresLtbQXbLGSPu3v+h47a3bgw93Zc6xoPWoNwWSLoC4JSJ7VkAVWPfkRWFS96o2tGEKfOq0ibskjx2Cv49R083FMv1/5s+e43WEapLc0PHigtptwnRByMjVBxd7KkgPYqbbnnfX3OHg++IBntPT5VuQaKXeSdPXomA/5rNAa2yzd6fu96f/bJdCBvt/pKZc7zjzhJ6FKD9RKXkgaZAgml36Jor6D+/7dgts43MT/WD1dRZT/tnl4nS3vZVTNw9eIHx3D7yKCClJXsaI7d9NneEeN9/oDb5TdKQQnBhWzIKk5GdXj1k8MYHilK+2ef7K3t/PeYvZjWP8WrfCRd/rfLPwS6vjzc4ykSB84gx2UKNA6KcYv7T/txU3x7lC3WvqxFxPzsdjQlkJ7tuzc06hZxyYHuKaRy7t0ZE8GCQuBz/ic1flHPyYb4IN/gx7S/fHPfP1ajME/1btSEBTrflX+TFRTRtCdDCvzP1GV/3no9zMdpB+qxruf7zKtP+fCQ/m8232nlQQGjGS72ee6zJr3mVCdD3eX2f4Bg+BM4eLRefpyMToM8BvJYpdhbz7NGsC26xnVkO1j3a06h6uiUwFbTMgwAoLd464j5aH97e6Av6861t7fB6shH2xlH3TXdlzIuEv4JaxKqGjiU6wQsJzaz4+Dzhwx7mU5Tb4tp9lzGSvuZcQVHCKyW49RMSvL3EZKIPUSQZsHkBNrmvbbRLAgJ79KPX+QBPIxd2Pf7SUXNZZCNqtay/t9Of2xbyZ/2Vsfa5L+mT9Px/xF2qP/gdeblMclBN4rGuH+xrW5uzfng9z9kD/+V2n3T309Pfvtv//P/wQAAP//cJQCCQ==") - SupportedMap = make(map[string]Spec) - - for f, v := range unpacked { - s, err := NewSpecFromBytes(v) - if err != nil { - panic("Cannot read spec from " + f + ": " + err.Error()) - } - Supported = append(Supported, s) - SupportedMap[strings.ToLower(s.Cmd)] = s - } -} diff --git a/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml b/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml deleted file mode 100644 index 7ea962de31a..00000000000 --- a/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml +++ /dev/null @@ -1,164 +0,0 @@ -auditbeat: - modules: - - audit_rules: | - # Things that affect identity. - -w /etc/group -p wa -k identity - backlog_limit: 8192 - backpressure_strategy: auto - failure_mode: silent - id: audit/auditd-auditd_manager.auditd-6d35f37c-2243-4229-9fba-ccf39305b536 - include_raw_message: true - include_warnings: false - index: logs-auditd_manager.auditd-default - module: auditd - processors: - - drop_event: - when.equals.source.ip: 127.0.0.1 - - add_fields: - fields: - dataset: auditd_manager.auditd - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: auditd_manager.auditd - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - rate_limit: 0 - resolve_ids: true - socket_type: multicast - tags: - - auditd_manager-auditd - - id: fim_1 - index: logs-auditd_manager.file_integrity-default - module: file_integrity - paths: - - /bin - - /usr/bin - - /sbin - - /usr/sbin - - /etc - processors: - - add_fields: - fields: - dataset: auditd_manager.file_integrity - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: auditd_manager.file_integrity - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - id: fim_2 - index: logs-auditd_manager.file_integrity-default - module: file_integrity - paths: - - /bin - - /usr/bin - - /sbin - - /usr/sbin - - /etc - processors: - - add_fields: - fields: - dataset: auditd_manager.file_integrity - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: auditd_manager.file_integrity - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - datasets: - - socket - id: id-auditd-system-socket - index: logs-audit_system.socket-default - module: system - processors: - - drop_event: - when.equals.source.ip: 127.0.0.1 - - add_fields: - fields: - dataset: audit_system.socket - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: audit_system.socket - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - datasets: - - process - id: id-auditd-system-process - index: logs-audit_system.process-default - module: system - processors: - - add_fields: - fields: - dataset: audit_system.process - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: audit_system.process - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/audit_config.yml b/internal/pkg/agent/program/testdata/audit_config.yml deleted file mode 100644 index a6d906d4cda..00000000000 --- a/internal/pkg/agent/program/testdata/audit_config.yml +++ /dev/null @@ -1,104 +0,0 @@ -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme -inputs: - - type: not_audit - this_is_ignored: ~ - streams: - - type: foo - - id: audit/auditd-auditd-6d35f37c-2243-4229-9fba-ccf39305b536 - name: auditd_manager-1 - revision: 1 - type: audit/auditd - use_output: default - meta: - package: - name: auditd_manager - version: 0.0.1 - data_stream: - namespace: default - streams: - - id: >- - audit/auditd-auditd_manager.auditd-6d35f37c-2243-4229-9fba-ccf39305b536 - type: audit/auditd - data_stream: - dataset: auditd_manager.auditd - type: logs - condition: '${host.platform} == ''linux''' - include_raw_message: true - socket_type: multicast - resolve_ids: true - failure_mode: silent - audit_rules: | - # Things that affect identity. - -w /etc/group -p wa -k identity - backlog_limit: 8192 - rate_limit: 0 - include_warnings: false - backpressure_strategy: auto - tags: - - auditd_manager-auditd - processors: - - drop_event: - when.equals.source.ip: '127.0.0.1' - - id: audit/auditd-file_integrity-6d35f37c-2243-4229-9fba-ccf39305b536 - name: auditd_manager-1 - revision: 1 - type: audit/file_integrity - use_output: default - meta: - package: - name: auditd_manager - version: 0.0.1 - data_stream: - namespace: default - streams: - - id: fim_1 - type: audit/file_integrity - data_stream: - dataset: auditd_manager.file_integrity - type: logs - paths: - - /bin - - /usr/bin - - /sbin - - /usr/sbin - - /etc - - id: fim_2 - type: audit/file_integrity - data_stream: - dataset: auditd_manager.file_integrity - type: logs - paths: - - /bin - - /usr/bin - - /sbin - - /usr/sbin - - /etc - - id: audit/system-system-6d35f37c-2243-4229-9fba-ccf39305b536 - type: audit/system - data_stream: - namespace: default - streams: - - id: id-auditd-system-socket - type: audit/system - dataset: socket - data_stream: - dataset: audit_system.socket - type: logs - processors: - - drop_event: - when.equals.source.ip: '127.0.0.1' - - id: id-auditd-system-process - type: audit/system - dataset: process - data_stream: - dataset: audit_system.process - type: logs -management: - host: "localhost" -config: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/enabled_false.yml b/internal/pkg/agent/program/testdata/enabled_false.yml deleted file mode 100644 index 34b7388e1e1..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_false.yml +++ /dev/null @@ -1,17 +0,0 @@ -inputs: - - type: event/file - streams: - - enabled: false - paths: - - var/log/hello1.log - - var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_output_false.yml b/internal/pkg/agent/program/testdata/enabled_output_false.yml deleted file mode 100644 index f0b57a01897..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_output_false.yml +++ /dev/null @@ -1,17 +0,0 @@ -inputs: - - type: event/file - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - enabled: false - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_output_true-filebeat.yml b/internal/pkg/agent/program/testdata/enabled_output_true-filebeat.yml deleted file mode 100644 index d9b4dc079f5..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_output_true-filebeat.yml +++ /dev/null @@ -1,38 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - enabled: true - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - headers: - h1: test-header - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_output_true.yml b/internal/pkg/agent/program/testdata/enabled_output_true.yml deleted file mode 100644 index 9601388c536..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_output_true.yml +++ /dev/null @@ -1,17 +0,0 @@ -inputs: - - type: event/file - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - enabled: true - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_true-filebeat.yml b/internal/pkg/agent/program/testdata/enabled_true-filebeat.yml deleted file mode 100644 index f579dcba416..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_true-filebeat.yml +++ /dev/null @@ -1,38 +0,0 @@ -filebeat: - inputs: - - type: log - enabled: true - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - headers: - h1: test-header - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_true.yml b/internal/pkg/agent/program/testdata/enabled_true.yml deleted file mode 100644 index 6afc7f37ab1..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_true.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Production Website DB Servers -fleet: - kibana_url: https://kibana.mydomain.com:5601 - ca_hash: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - checkin_interval: 5m -inputs: - - type: event/file - streams: - - enabled: true - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/endpoint_arm.yml b/internal/pkg/agent/program/testdata/endpoint_arm.yml deleted file mode 100644 index 5353cd43d9c..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_arm.yml +++ /dev/null @@ -1,117 +0,0 @@ -revision: 5 -name: Endpoint Host -fleet: - agent: - id: fleet-agent-id - host: - id: host-agent-id - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - kibana: - protocol: https - host: localhost:5601 - timeout: 30s - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true -runtime: - arch: arm64 - family: redhat - major: "7" diff --git a/internal/pkg/agent/program/testdata/endpoint_basic-endpoint-security.yml b/internal/pkg/agent/program/testdata/endpoint_basic-endpoint-security.yml deleted file mode 100644 index dfbec8016ba..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_basic-endpoint-security.yml +++ /dev/null @@ -1,113 +0,0 @@ -revision: 5 -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: - - "127.0.0.1:9200" - - "127.0.0.1:9300" - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/endpoint_basic.yml b/internal/pkg/agent/program/testdata/endpoint_basic.yml deleted file mode 100644 index 9f438cd46fd..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_basic.yml +++ /dev/null @@ -1,115 +0,0 @@ -revision: 5 -name: Endpoint Host -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true -runtime: - arch: x86_64 diff --git a/internal/pkg/agent/program/testdata/endpoint_no_fleet.yml b/internal/pkg/agent/program/testdata/endpoint_no_fleet.yml deleted file mode 100644 index de7ccd2a11c..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_no_fleet.yml +++ /dev/null @@ -1,102 +0,0 @@ -name: Endpoint Host -revision: 5 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/endpoint_unknown_output.yml b/internal/pkg/agent/program/testdata/endpoint_unknown_output.yml deleted file mode 100644 index 48e362849be..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_unknown_output.yml +++ /dev/null @@ -1,107 +0,0 @@ -name: Endpoint Host -revision: 5 -fleet: - agent: - id: fleet-agent-id - api: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - kibana: - protocol: https - host: localhost:5601 - timeout: 30s - -outputs: - default: - type: unknown - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/fleet_server-fleet-server.yml b/internal/pkg/agent/program/testdata/fleet_server-fleet-server.yml deleted file mode 100644 index 7a0fad5c9df..00000000000 --- a/internal/pkg/agent/program/testdata/fleet_server-fleet-server.yml +++ /dev/null @@ -1,33 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - headers: - h1: test-header - username: fleet - password: fleetpassword - -inputs: - - id: fleet-server-id - type: fleet-server - name: fleet_server-1 - revision: 6 - server: - host: 0.0.0.0 - port: 8220 - ssl: - verification_mode: none - limits: - max_connections: 40 - runtime: - gc_percent: 50 - timeouts: - read: 5m - policy: - id: copy-policy-id diff --git a/internal/pkg/agent/program/testdata/fleet_server.yml b/internal/pkg/agent/program/testdata/fleet_server.yml deleted file mode 100644 index a816197917e..00000000000 --- a/internal/pkg/agent/program/testdata/fleet_server.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Fleet Server Only -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - host: 127.0.0.1 - port: 8822 - ssl: - verification_mode: none - policy: - id: copy-policy-id - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: - - id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - namespace: default - name: fleet_server-1 - revision: 6 - server: - host: 0.0.0.0 - port: 8220 - limits: - max_connections: 40 - runtime: - gc_percent: 50 - timeouts: - read: 5m diff --git a/internal/pkg/agent/program/testdata/journal_config.yml b/internal/pkg/agent/program/testdata/journal_config.yml deleted file mode 100644 index 732ebab6fb2..00000000000 --- a/internal/pkg/agent/program/testdata/journal_config.yml +++ /dev/null @@ -1,21 +0,0 @@ -streams: - - type: log/journal - paths: [] - backoff: 1s - max_backoff: 20s - seek: cursor - cursor_seek_fallback: head - include_matches: [] - save_remote_hostname: false - - type: log/file - ignore_older: 123s -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/logstash_config-endpoint-security.yml b/internal/pkg/agent/program/testdata/logstash_config-endpoint-security.yml deleted file mode 100644 index 37b47f631a1..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-endpoint-security.yml +++ /dev/null @@ -1,114 +0,0 @@ -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 - -inputs: -- id: endpoint-id - type: endpoint - use_input: default - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/logstash_config-filebeat-elasticsearch.yml b/internal/pkg/agent/program/testdata/logstash_config-filebeat-elasticsearch.yml deleted file mode 100644 index 39dab9091ed..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-filebeat-elasticsearch.yml +++ /dev/null @@ -1,43 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello3.log - - /var/log/hello4.log - index: testtype-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: testtype - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9201 - - 127.0.0.1:9301 - headers: - h1: test-header - username: elastic - password: changeme - bulk_max_size: 23 - worker: 10 - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/logstash_config-filebeat.yml b/internal/pkg/agent/program/testdata/logstash_config-filebeat.yml deleted file mode 100644 index ee44efd259e..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-filebeat.yml +++ /dev/null @@ -1,39 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 diff --git a/internal/pkg/agent/program/testdata/logstash_config-fleet-server.yml b/internal/pkg/agent/program/testdata/logstash_config-fleet-server.yml deleted file mode 100644 index b306cf40277..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-fleet-server.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - headers: - h1: test-header - username: fleet - password: fleetpassword - -inputs: - - id: fleet-server-id - type: fleet-server diff --git a/internal/pkg/agent/program/testdata/logstash_config-heartbeat.yml b/internal/pkg/agent/program/testdata/logstash_config-heartbeat.yml deleted file mode 100644 index fbddfbe022e..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-heartbeat.yml +++ /dev/null @@ -1,29 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 diff --git a/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml b/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml deleted file mode 100644 index a2d429c8108..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml +++ /dev/null @@ -1,89 +0,0 @@ -metricbeat: - modules: - - module: docker - metricsets: [status] - index: metrics-docker.status-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: docker.status - namespace: default - - add_fields: - target: "event" - fields: - dataset: docker.status - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: docker - metricsets: [info] - index: metrics-generic-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: apache - metricsets: [info] - index: metrics-generic-testing - hosts: ["http://apache.remote"] - processors: - - add_fields: - fields: - should_be: first - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: testing - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 diff --git a/internal/pkg/agent/program/testdata/logstash_config-packetbeat.yml b/internal/pkg/agent/program/testdata/logstash_config-packetbeat.yml deleted file mode 100644 index 2258ea5aa8d..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-packetbeat.yml +++ /dev/null @@ -1,34 +0,0 @@ -inputs: -- type: packet - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 diff --git a/internal/pkg/agent/program/testdata/logstash_config.yml b/internal/pkg/agent/program/testdata/logstash_config.yml deleted file mode 100644 index ab2a8b744a0..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config.yml +++ /dev/null @@ -1,212 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: logstash - ssl.certificate: abcert - ssl.key: abckey - hosts: [127.0.0.1:5044] - ssl.certificate_authorities: - - abc1 - - abc2 - - elasticsearch: - type: elasticsearch - hosts: [127.0.0.1:9201, 127.0.0.1:9301] - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - data_stream: - type: testtype - use_output: elasticsearch - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - use_input: default - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/namespace-endpoint-security.yml b/internal/pkg/agent/program/testdata/namespace-endpoint-security.yml deleted file mode 100644 index 7e9f04dc411..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-endpoint-security.yml +++ /dev/null @@ -1,114 +0,0 @@ -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: - - "127.0.0.1:9200" - - "127.0.0.1:9300" - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/namespace-filebeat.yml b/internal/pkg/agent/program/testdata/namespace-filebeat.yml deleted file mode 100644 index eafedb688c9..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-filebeat.yml +++ /dev/null @@ -1,71 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - type: log - paths: - - /var/log/hello3.log - - /var/log/hello4.log - index: testtype-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: testtype - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - headers: - h1: test-header - - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/namespace-fleet-server.yml b/internal/pkg/agent/program/testdata/namespace-fleet-server.yml deleted file mode 100644 index b306cf40277..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-fleet-server.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - headers: - h1: test-header - username: fleet - password: fleetpassword - -inputs: - - id: fleet-server-id - type: fleet-server diff --git a/internal/pkg/agent/program/testdata/namespace-heartbeat.yml b/internal/pkg/agent/program/testdata/namespace-heartbeat.yml deleted file mode 100644 index f34b204f5fa..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-heartbeat.yml +++ /dev/null @@ -1,30 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/namespace-metricbeat.yml b/internal/pkg/agent/program/testdata/namespace-metricbeat.yml deleted file mode 100644 index d0d4c24f058..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-metricbeat.yml +++ /dev/null @@ -1,91 +0,0 @@ -metricbeat: - modules: - - module: docker - metricsets: [status] - index: metrics-docker.status-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: docker.status - namespace: default - - add_fields: - target: "event" - fields: - dataset: docker.status - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: docker - metricsets: [info] - index: metrics-generic-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: apache - metricsets: [info] - index: metrics-generic-testing - hosts: ["http://apache.remote"] - processors: - - add_fields: - fields: - should_be: first - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: testing - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - headers: - h1: test-header - - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/namespace-packetbeat.yml b/internal/pkg/agent/program/testdata/namespace-packetbeat.yml deleted file mode 100644 index d71499bdef4..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-packetbeat.yml +++ /dev/null @@ -1,35 +0,0 @@ -inputs: -- type: packet - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/namespace.yml b/internal/pkg/agent/program/testdata/namespace.yml deleted file mode 100644 index c2f83a9abf0..00000000000 --- a/internal/pkg/agent/program/testdata/namespace.yml +++ /dev/null @@ -1,201 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - namespace: test_namespace - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - data_stream: - type: testtype - use_output: default - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/single_config-endpoint-security.yml b/internal/pkg/agent/program/testdata/single_config-endpoint-security.yml deleted file mode 100644 index f7bcdb284c4..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-endpoint-security.yml +++ /dev/null @@ -1,115 +0,0 @@ -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: - - "127.0.0.1:9200" - - "127.0.0.1:9300" - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/single_config-filebeat.yml b/internal/pkg/agent/program/testdata/single_config-filebeat.yml deleted file mode 100644 index e628cd2c098..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-filebeat.yml +++ /dev/null @@ -1,71 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - type: log - paths: - - /var/log/hello3.log - - /var/log/hello4.log - index: testtype-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: testtype - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - headers: - h1: test-header - username: elastic - password: changeme - bulk_max_size: 23 - worker: 10 - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/single_config-fleet-server.yml b/internal/pkg/agent/program/testdata/single_config-fleet-server.yml deleted file mode 100644 index b306cf40277..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-fleet-server.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - headers: - h1: test-header - username: fleet - password: fleetpassword - -inputs: - - id: fleet-server-id - type: fleet-server diff --git a/internal/pkg/agent/program/testdata/single_config-heartbeat.yml b/internal/pkg/agent/program/testdata/single_config-heartbeat.yml deleted file mode 100644 index 800f4100382..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-heartbeat.yml +++ /dev/null @@ -1,31 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/single_config-metricbeat.yml b/internal/pkg/agent/program/testdata/single_config-metricbeat.yml deleted file mode 100644 index a2c36b151f0..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-metricbeat.yml +++ /dev/null @@ -1,91 +0,0 @@ -metricbeat: - modules: - - module: docker - metricsets: [status] - index: metrics-docker.status-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: docker.status - namespace: default - - add_fields: - target: "event" - fields: - dataset: docker.status - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: docker - metricsets: [info] - index: metrics-generic-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: apache - metricsets: [info] - index: metrics-generic-testing - hosts: ["http://apache.remote"] - processors: - - add_fields: - fields: - should_be: first - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: testing - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - headers: - h1: test-header - username: elastic - password: changeme - bulk_max_size: 23 - worker: 10 - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/single_config-packetbeat.yml b/internal/pkg/agent/program/testdata/single_config-packetbeat.yml deleted file mode 100644 index f800d0bd2a0..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-packetbeat.yml +++ /dev/null @@ -1,36 +0,0 @@ -inputs: -- type: packet - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - username: elastic - password: changeme - bulk_max_size: 23 - worker: 10 - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/single_config.yml b/internal/pkg/agent/program/testdata/single_config.yml deleted file mode 100644 index 16a03f9a77d..00000000000 --- a/internal/pkg/agent/program/testdata/single_config.yml +++ /dev/null @@ -1,202 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - data_stream: - type: testtype - use_output: default - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/synthetics_config-heartbeat.yml b/internal/pkg/agent/program/testdata/synthetics_config-heartbeat.yml deleted file mode 100644 index 284d391f78b..00000000000 --- a/internal/pkg/agent/program/testdata/synthetics_config-heartbeat.yml +++ /dev/null @@ -1,66 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -- type: synthetics/tcp - id: unique-tcp-id - name: my-tcp - schedule: '*/5 * * * * * *' - host: "localhost:777" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -- type: synthetics/icmp - id: unique-icmp-id - name: my-icmp - schedule: '@every 5s' - host: "localhost" - ipv4: true - ipv6: true - mode: any - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -output: - elasticsearch: - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/synthetics_config.yml b/internal/pkg/agent/program/testdata/synthetics_config.yml deleted file mode 100644 index 74aa9916a65..00000000000 --- a/internal/pkg/agent/program/testdata/synthetics_config.yml +++ /dev/null @@ -1,31 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: synthetics/tcp - id: unique-tcp-id - name: my-tcp - schedule: '*/5 * * * * * *' - host: "localhost:777" - timeout: 16s - wait: 1s -- type: synthetics/icmp - id: unique-icmp-id - name: my-icmp - schedule: '@every 5s' - host: "localhost" - ipv4: true - ipv6: true - mode: any - timeout: 16s - wait: 1s -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/usecases/enabled_output_true.yml b/internal/pkg/agent/program/testdata/usecases/enabled_output_true.yml deleted file mode 100644 index 9601388c536..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/enabled_output_true.yml +++ /dev/null @@ -1,17 +0,0 @@ -inputs: - - type: event/file - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - enabled: true - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/usecases/enabled_true.yml b/internal/pkg/agent/program/testdata/usecases/enabled_true.yml deleted file mode 100644 index 6afc7f37ab1..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/enabled_true.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Production Website DB Servers -fleet: - kibana_url: https://kibana.mydomain.com:5601 - ca_hash: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - checkin_interval: 5m -inputs: - - type: event/file - streams: - - enabled: true - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/usecases/endpoint_basic.yml b/internal/pkg/agent/program/testdata/usecases/endpoint_basic.yml deleted file mode 100644 index 9f438cd46fd..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/endpoint_basic.yml +++ /dev/null @@ -1,115 +0,0 @@ -revision: 5 -name: Endpoint Host -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true -runtime: - arch: x86_64 diff --git a/internal/pkg/agent/program/testdata/usecases/fleet_server.yml b/internal/pkg/agent/program/testdata/usecases/fleet_server.yml deleted file mode 100644 index a816197917e..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/fleet_server.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Fleet Server Only -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - host: 127.0.0.1 - port: 8822 - ssl: - verification_mode: none - policy: - id: copy-policy-id - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: - - id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - namespace: default - name: fleet_server-1 - revision: 6 - server: - host: 0.0.0.0 - port: 8220 - limits: - max_connections: 40 - runtime: - gc_percent: 50 - timeouts: - read: 5m diff --git a/internal/pkg/agent/program/testdata/usecases/generated/enabled_output_true.filebeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/enabled_output_true.filebeat.golden.yml deleted file mode 100644 index 6b898a6128a..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/enabled_output_true.filebeat.golden.yml +++ /dev/null @@ -1,38 +0,0 @@ -filebeat: - inputs: - - index: logs-generic-default - paths: - - /var/log/hello1.log - - /var/log/hello2.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log -output: - elasticsearch: - enabled: true - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/enabled_true.filebeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/enabled_true.filebeat.golden.yml deleted file mode 100644 index 197bf2f6232..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/enabled_true.filebeat.golden.yml +++ /dev/null @@ -1,38 +0,0 @@ -filebeat: - inputs: - - enabled: true - index: logs-generic-default - paths: - - /var/log/hello1.log - - /var/log/hello2.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/endpoint_basic.endpoint-security.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/endpoint_basic.endpoint-security.golden.yml deleted file mode 100644 index 6359f9185b8..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/endpoint_basic.endpoint-security.golden.yml +++ /dev/null @@ -1,112 +0,0 @@ -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - hosts: - - localhost:5601 - protocol: https - timeout: 30s -inputs: -- artifact_manifest: - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - manifest_version: v21 - schema_version: v22 - data_stream: - namespace: default - enabled: true - id: endpoint-id - name: endpoint-1 - package: - name: endpoint - version: 0.3.0 - policy: - linux: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - mac: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - alerts: logs-endpoint.alerts-default - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - windows: - advanced: - free-form: free-form-value - events: - dll_and_driver_load: false - dns: false - file: true - network: true - process: true - registry: true - security: true - indices: - alerts: logs-endpoint.alerts-default - driver: logs-endpoint.events.driver-default - file: logs-endpoint.events.file-default - library: logs-endpoint.events.library-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - registry: logs-endpoint.events.registry-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - type: endpoint -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic -revision: 5 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/fleet_server.fleet-server.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/fleet_server.fleet-server.golden.yml deleted file mode 100644 index 01dc3bd3c89..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/fleet_server.fleet-server.golden.yml +++ /dev/null @@ -1,33 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id -inputs: -- id: fleet-server-id - name: fleet_server-1 - policy: - id: copy-policy-id - revision: 6 - server: - host: 0.0.0.0 - limits: - max_connections: 40 - port: 8220 - runtime: - gc_percent: 50 - ssl: - verification_mode: none - timeouts: - read: 5m - type: fleet-server -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: fleetpassword - username: fleet diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.endpoint-security.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.endpoint-security.golden.yml deleted file mode 100644 index 7c8b033c4e6..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.endpoint-security.golden.yml +++ /dev/null @@ -1,113 +0,0 @@ -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - agent: - id: fleet-agent-id - logging.level: error - enabled: true - host: - id: host-agent-id - hosts: - - localhost:5601 - protocol: https - timeout: 30s -inputs: -- artifact_manifest: - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - manifest_version: v21 - schema_version: v22 - data_stream: - namespace: default - enabled: true - id: endpoint-id - name: endpoint-1 - package: - name: endpoint - version: 0.3.0 - policy: - linux: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - mac: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - alerts: logs-endpoint.alerts-default - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - windows: - advanced: - free-form: free-form-value - events: - dll_and_driver_load: false - dns: false - file: true - network: true - process: true - registry: true - security: true - indices: - alerts: logs-endpoint.alerts-default - driver: logs-endpoint.events.driver-default - file: logs-endpoint.events.file-default - library: logs-endpoint.events.library-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - registry: logs-endpoint.events.registry-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - type: endpoint -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.filebeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.filebeat.golden.yml deleted file mode 100644 index 2def5f274de..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.filebeat.golden.yml +++ /dev/null @@ -1,70 +0,0 @@ -filebeat: - inputs: - - index: logs-generic-default - paths: - - /var/log/hello1.log - - /var/log/hello2.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log - vars: - var: value - - index: testtype-generic-default - paths: - - /var/log/hello3.log - - /var/log/hello4.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: testtype - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log - vars: - var: value -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.fleet-server.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.fleet-server.golden.yml deleted file mode 100644 index ab7499a4f11..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.fleet-server.golden.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id -inputs: -- id: fleet-server-id - type: fleet-server -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: fleetpassword - username: fleet diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.heartbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.heartbeat.golden.yml deleted file mode 100644 index c18573ee780..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.heartbeat.golden.yml +++ /dev/null @@ -1,30 +0,0 @@ -inputs: -- data_stream.namespace: default - host: http://localhost:80/service/status - id: unique-http-id - name: my-http - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '*/5 * * * * * *' - timeout: 16s - type: synthetics/http - wait: 1s -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml deleted file mode 100644 index 3232297227a..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml +++ /dev/null @@ -1,98 +0,0 @@ -metricbeat: - modules: - - hosts: - - http://127.0.0.1:8080 - index: metrics-docker.status-default - metricsets: - - status - module: docker - processors: - - add_fields: - fields: - dataset: docker.status - namespace: default - type: metrics - target: data_stream - - add_fields: - fields: - dataset: docker.status - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - hosts: - - http://127.0.0.1:8080 - index: metrics-generic-default - metricsets: - - info - module: docker - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: metrics - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - hosts: - - http://apache.remote - index: metrics-generic-testing - metricsets: - - info - module: apache - processors: - - add_fields: - fields: - should_be: first - - add_fields: - fields: - dataset: generic - namespace: testing - type: metrics - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.packetbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.packetbeat.golden.yml deleted file mode 100644 index cc38887ff8e..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.packetbeat.golden.yml +++ /dev/null @@ -1,35 +0,0 @@ -inputs: -- processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - streams: - - data_stream: - dataset: packet.flow - type: logs - keep_null: false - period: 10s - timeout: 10s - type: flow - - data_stream: - dataset: packet.icmp - type: logs - type: icmp - type: packet -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.endpoint-security.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.endpoint-security.golden.yml deleted file mode 100644 index 552e169bbac..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.endpoint-security.golden.yml +++ /dev/null @@ -1,114 +0,0 @@ -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - agent: - id: fleet-agent-id - logging.level: error - enabled: true - host: - id: host-agent-id - hosts: - - localhost:5601 - protocol: https - timeout: 30s -inputs: -- artifact_manifest: - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - manifest_version: v21 - schema_version: v22 - data_stream: - namespace: default - enabled: true - id: endpoint-id - name: endpoint-1 - package: - name: endpoint - version: 0.3.0 - policy: - linux: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - mac: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - alerts: logs-endpoint.alerts-default - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - windows: - advanced: - free-form: free-form-value - events: - dll_and_driver_load: false - dns: false - file: true - network: true - process: true - registry: true - security: true - indices: - alerts: logs-endpoint.alerts-default - driver: logs-endpoint.events.driver-default - file: logs-endpoint.events.file-default - library: logs-endpoint.events.library-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - registry: logs-endpoint.events.registry-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - type: endpoint -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml deleted file mode 100644 index 22055f9e09e..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml +++ /dev/null @@ -1,73 +0,0 @@ -filebeat: - inputs: - - id: logfile-1 - index: logs-generic-default - paths: - - /var/log/hello1.log - - /var/log/hello2.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log - vars: - var: value - - id: logfile-2 - index: testtype-generic-default - paths: - - /var/log/hello3.log - - /var/log/hello4.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: testtype - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log - vars: - var: value -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.fleet-server.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.fleet-server.golden.yml deleted file mode 100644 index ab7499a4f11..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.fleet-server.golden.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id -inputs: -- id: fleet-server-id - type: fleet-server -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: fleetpassword - username: fleet diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.heartbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.heartbeat.golden.yml deleted file mode 100644 index f4c5827603a..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.heartbeat.golden.yml +++ /dev/null @@ -1,31 +0,0 @@ -inputs: -- data_stream.namespace: default - host: http://localhost:80/service/status - id: unique-http-id - name: my-http - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '*/5 * * * * * *' - timeout: 16s - type: synthetics/http - wait: 1s -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml deleted file mode 100644 index aca14055635..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml +++ /dev/null @@ -1,99 +0,0 @@ -metricbeat: - modules: - - hosts: - - http://127.0.0.1:8080 - index: metrics-docker.status-default - metricsets: - - status - module: docker - processors: - - add_fields: - fields: - dataset: docker.status - namespace: default - type: metrics - target: data_stream - - add_fields: - fields: - dataset: docker.status - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - hosts: - - http://127.0.0.1:8080 - index: metrics-generic-default - metricsets: - - info - module: docker - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: metrics - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - hosts: - - http://apache.remote - index: metrics-generic-testing - metricsets: - - info - module: apache - processors: - - add_fields: - fields: - should_be: first - - add_fields: - fields: - dataset: generic - namespace: testing - type: metrics - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.packetbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.packetbeat.golden.yml deleted file mode 100644 index e7f13deb0a2..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.packetbeat.golden.yml +++ /dev/null @@ -1,36 +0,0 @@ -inputs: -- processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - streams: - - data_stream: - dataset: packet.flow - type: logs - keep_null: false - period: 10s - timeout: 10s - type: flow - - data_stream: - dataset: packet.icmp - type: logs - type: icmp - type: packet -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/synthetics_config.heartbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/synthetics_config.heartbeat.golden.yml deleted file mode 100644 index 870a0070f4e..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/synthetics_config.heartbeat.golden.yml +++ /dev/null @@ -1,68 +0,0 @@ -inputs: -- data_stream.namespace: default - host: http://localhost:80/service/status - id: unique-http-id - name: my-http - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '*/5 * * * * * *' - timeout: 16s - type: synthetics/http - wait: 1s -- data_stream.namespace: default - host: localhost:777 - id: unique-tcp-id - name: my-tcp - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '*/5 * * * * * *' - timeout: 16s - type: synthetics/tcp - wait: 1s -- data_stream.namespace: default - host: localhost - id: unique-icmp-id - ipv4: true - ipv6: true - mode: any - name: my-icmp - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '@every 5s' - timeout: 16s - type: synthetics/icmp - wait: 1s -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/namespace.yml b/internal/pkg/agent/program/testdata/usecases/namespace.yml deleted file mode 100644 index c2f83a9abf0..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/namespace.yml +++ /dev/null @@ -1,201 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - namespace: test_namespace - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - data_stream: - type: testtype - use_output: default - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/usecases/single_config.yml b/internal/pkg/agent/program/testdata/usecases/single_config.yml deleted file mode 100644 index 654453af2c6..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/single_config.yml +++ /dev/null @@ -1,204 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - id: logfile-1 - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - id: logfile-2 - data_stream: - type: testtype - use_output: default - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/usecases/synthetics_config.yml b/internal/pkg/agent/program/testdata/usecases/synthetics_config.yml deleted file mode 100644 index 74aa9916a65..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/synthetics_config.yml +++ /dev/null @@ -1,31 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: synthetics/tcp - id: unique-tcp-id - name: my-tcp - schedule: '*/5 * * * * * *' - host: "localhost:777" - timeout: 16s - wait: 1s -- type: synthetics/icmp - id: unique-icmp-id - name: my-icmp - schedule: '@every 5s' - host: "localhost" - ipv4: true - ipv6: true - mode: any - timeout: 16s - wait: 1s -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/artifact/install/atomic/atomic_installer.go b/internal/pkg/artifact/install/atomic/atomic_installer.go deleted file mode 100644 index 10c2652c1c8..00000000000 --- a/internal/pkg/artifact/install/atomic/atomic_installer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package atomic - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -type embeddedInstaller interface { - Install(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// Installer installs into temporary destination and moves to correct one after -// successful finish. -type Installer struct { - installer embeddedInstaller -} - -// NewInstaller creates a new AtomicInstaller -func NewInstaller(i embeddedInstaller) (*Installer, error) { - return &Installer{ - installer: i, - }, nil -} - -// Install performs installation of program in a specific version. -func (i *Installer) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - // tar installer uses Dir of installDir to determine location of unpack - // - // installer is ran inside a tmp directory created in the parent installDir, this is so the atomic - // rename always occurs on the same mount path that holds the installation directory - tempDir, err := ioutil.TempDir(filepath.Dir(installDir), "tmp") - if err != nil { - return err - } - - // always remove the entire tempDir - defer func() { - os.RemoveAll(tempDir) - }() - - tempInstallDir := filepath.Join(tempDir, filepath.Base(installDir)) - - // cleanup install directory before Install - if _, err := os.Stat(installDir); err == nil || os.IsExist(err) { - os.RemoveAll(installDir) - } - - if _, err := os.Stat(tempInstallDir); err == nil || os.IsExist(err) { - os.RemoveAll(tempInstallDir) - } - - // on windows rename is not atomic, let's force it to flush the cache - defer func() { - if runtime.GOOS == "windows" { - syncDir(installDir) - syncDir(tempInstallDir) - } - }() - - if err := i.installer.Install(ctx, spec, version, tempInstallDir); err != nil { - // cleanup unfinished install - if rerr := os.RemoveAll(tempInstallDir); rerr != nil { - err = multierror.Append(err, rerr) - } - return err - } - - if err := os.Rename(tempInstallDir, installDir); err != nil { - if rerr := os.RemoveAll(installDir); rerr != nil { - err = multierror.Append(err, rerr) - } - if rerr := os.RemoveAll(tempInstallDir); rerr != nil { - err = multierror.Append(err, rerr) - } - return err - } - - return nil -} - -func syncDir(dir string) { - if f, err := os.OpenFile(dir, os.O_RDWR, 0777); err == nil { - f.Sync() - f.Close() - } -} diff --git a/internal/pkg/artifact/install/atomic/atomic_installer_test.go b/internal/pkg/artifact/install/atomic/atomic_installer_test.go deleted file mode 100644 index 08c8b592d6a..00000000000 --- a/internal/pkg/artifact/install/atomic/atomic_installer_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package atomic - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -func TestOKInstall(t *testing.T) { - sig := make(chan int) - ti := &testInstaller{sig} - var wg sync.WaitGroup - i, err := NewInstaller(ti) - s := program.Spec{Name: "a", Cmd: "a"} - - assert.NoError(t, err) - - ctx := context.Background() - installDir := filepath.Join(paths.TempDir(), "install_dir") - - wg.Add(1) - go func() { - err := i.Install(ctx, s, "b", installDir) - assert.NoError(t, err) - wg.Done() - }() - - // signal to process next files - close(sig) - - wg.Wait() - - assert.DirExists(t, installDir) - files := getFiles() - - for name := range files { - path := filepath.Join(installDir, name) - assert.FileExists(t, path) - } - - os.RemoveAll(installDir) -} - -func TestContextCancelledInstall(t *testing.T) { - sig := make(chan int) - ti := &testInstaller{sig} - var wg sync.WaitGroup - i, err := NewInstaller(ti) - s := program.Spec{Name: "a", Cmd: "a"} - - assert.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - installDir := filepath.Join(paths.TempDir(), "install_dir") - - wg.Add(1) - go func() { - err := i.Install(ctx, s, "b", installDir) - assert.Error(t, err) - wg.Done() - }() - - // cancel before signaling - cancel() - close(sig) - - wg.Wait() - - assert.NoDirExists(t, installDir) -} - -type testInstaller struct { - signal chan int -} - -func (ti *testInstaller) Install(ctx context.Context, _ program.Spec, _, installDir string) error { - files := getFiles() - if err := os.MkdirAll(installDir, 0777); err != nil { - return err - } - - for name, content := range files { - if err := ctx.Err(); err != nil { - return err - } - - filename := filepath.Join(installDir, name) - if err := ioutil.WriteFile(filename, content, 0666); err != nil { - return err - } - - // wait for all but last - <-ti.signal - } - - return nil -} - -func getFiles() map[string][]byte { - files := make(map[string][]byte) - fileCount := 3 - for i := 1; i <= fileCount; i++ { - files[fmt.Sprintf("file_%d", i)] = []byte(fmt.Sprintf("content of file %d", i)) - } - - return files -} diff --git a/internal/pkg/artifact/install/awaitable/awaitable_installer.go b/internal/pkg/artifact/install/awaitable/awaitable_installer.go deleted file mode 100644 index 33e8c6c48d5..00000000000 --- a/internal/pkg/artifact/install/awaitable/awaitable_installer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package awaitable - -import ( - "context" - "sync" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -type embeddedInstaller interface { - Install(ctx context.Context, spec program.Spec, version, installDir string) error -} - -type embeddedChecker interface { - Check(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// Installer installs into temporary destination and moves to correct one after -// successful finish. -type Installer struct { - installer embeddedInstaller - checker embeddedChecker - wg sync.WaitGroup -} - -// NewInstaller creates a new AtomicInstaller -func NewInstaller(i embeddedInstaller, ch embeddedChecker) (*Installer, error) { - return &Installer{ - installer: i, - checker: ch, - }, nil -} - -// Wait allows caller to wait for install to be finished -func (i *Installer) Wait() { - i.wg.Wait() -} - -// Install performs installation of program in a specific version. -func (i *Installer) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - i.wg.Add(1) - defer i.wg.Done() - - return i.installer.Install(ctx, spec, version, installDir) -} - -// Check performs installation checks -func (i *Installer) Check(ctx context.Context, spec program.Spec, version, installDir string) error { - i.wg.Add(1) - defer i.wg.Done() - - return i.checker.Check(ctx, spec, version, installDir) -} diff --git a/internal/pkg/artifact/install/dir/dir_checker.go b/internal/pkg/artifact/install/dir/dir_checker.go deleted file mode 100644 index 38a93756ff8..00000000000 --- a/internal/pkg/artifact/install/dir/dir_checker.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package dir - -import ( - "context" - "os" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -// Checker performs basic check that the install directory exists. -type Checker struct{} - -// NewChecker returns a new Checker. -func NewChecker() *Checker { - return &Checker{} -} - -// Check checks that the install directory exists. -func (*Checker) Check(_ context.Context, _ program.Spec, _, installDir string) error { - _, err := os.Stat(installDir) - return err -} diff --git a/internal/pkg/artifact/install/hooks/hooks_installer.go b/internal/pkg/artifact/install/hooks/hooks_installer.go deleted file mode 100644 index 73ce7b81c5b..00000000000 --- a/internal/pkg/artifact/install/hooks/hooks_installer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package hooks - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -type embeddedInstaller interface { - Install(ctx context.Context, spec program.Spec, version, installDir string) error -} - -type embeddedChecker interface { - Check(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// InstallerChecker runs the PostInstallSteps after running the embedded installer -// and runs the InstallerCheckSteps after running the embedded installation checker. -type InstallerChecker struct { - installer embeddedInstaller - checker embeddedChecker -} - -// NewInstallerChecker creates a new InstallerChecker -func NewInstallerChecker(i embeddedInstaller, c embeddedChecker) (*InstallerChecker, error) { - return &InstallerChecker{ - installer: i, - checker: c, - }, nil -} - -// Install performs installation of program in a specific version, then runs the -// PostInstallSteps for the program if defined. -func (i *InstallerChecker) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - if err := i.installer.Install(ctx, spec, version, installDir); err != nil { - return err - } - if spec.PostInstallSteps != nil { - return spec.PostInstallSteps.Execute(ctx, installDir) - } - return nil -} - -// Check performs installation check of program to ensure that it is already installed, then -// runs the InstallerCheckSteps to ensure that the installation is valid. -func (i *InstallerChecker) Check(ctx context.Context, spec program.Spec, version, installDir string) error { - err := i.checker.Check(ctx, spec, version, installDir) - if err != nil { - return err - } - if spec.CheckInstallSteps != nil { - return spec.CheckInstallSteps.Execute(ctx, installDir) - } - - return nil -} diff --git a/internal/pkg/artifact/install/installer.go b/internal/pkg/artifact/install/installer.go deleted file mode 100644 index 15bc01e6f3a..00000000000 --- a/internal/pkg/artifact/install/installer.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package install - -import ( - "context" - "errors" - "runtime" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/atomic" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/awaitable" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/dir" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/hooks" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/tar" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/zip" -) - -var ( - // ErrConfigNotProvided is returned when provided config is nil - ErrConfigNotProvided = errors.New("config not provided") -) - -// Installer is an interface allowing installation of an artifact -type Installer interface { - // Install installs an artifact and returns - // location of the installed program - // error if something went wrong - Install(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// InstallerChecker is an interface that installs but also checks for valid installation. -type InstallerChecker interface { - Installer - - // Check checks if the installation is good. - Check(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// AwaitableInstallerChecker is an interface that installs, checks but also is awaitable to check when actions are done. -type AwaitableInstallerChecker interface { - InstallerChecker - - // Waits for its work to be done. - Wait() -} - -// NewInstaller returns a correct installer associated with a -// package type: -// - rpm -> rpm installer -// - deb -> deb installer -// - binary -> zip installer on windows, tar installer on linux and mac -func NewInstaller(config *artifact.Config) (AwaitableInstallerChecker, error) { - if config == nil { - return nil, ErrConfigNotProvided - } - - var installer Installer - var err error - if runtime.GOOS == "windows" { - installer, err = zip.NewInstaller(config) - } else { - installer, err = tar.NewInstaller(config) - } - - if err != nil { - return nil, err - } - - atomicInstaller, err := atomic.NewInstaller(installer) - if err != nil { - return nil, err - } - - hooksInstaller, err := hooks.NewInstallerChecker(atomicInstaller, dir.NewChecker()) - if err != nil { - return nil, err - } - - return awaitable.NewInstaller(hooksInstaller, hooksInstaller) -} diff --git a/internal/pkg/artifact/install/tar/tar_installer.go b/internal/pkg/artifact/install/tar/tar_installer.go deleted file mode 100644 index de7f02dfe38..00000000000 --- a/internal/pkg/artifact/install/tar/tar_installer.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package tar - -import ( - "archive/tar" - "compress/gzip" - "context" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" -) - -// Installer or tar packages -type Installer struct { - config *artifact.Config -} - -// NewInstaller creates an installer able to install tar packages -func NewInstaller(config *artifact.Config) (*Installer, error) { - return &Installer{ - config: config, - }, nil -} - -// Install performs installation of program in a specific version. -// It expects package to be already downloaded. -func (i *Installer) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - artifactPath, err := artifact.GetArtifactPath(spec, version, i.config.OS(), i.config.Arch(), i.config.TargetDirectory) - if err != nil { - return err - } - - f, err := os.Open(artifactPath) - if err != nil { - return errors.New(fmt.Sprintf("artifact for '%s' version '%s' could not be found at '%s'", spec.Name, version, artifactPath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, artifactPath)) - } - defer f.Close() - - // cleanup install directory before unpack - _, err = os.Stat(installDir) - if err == nil || os.IsExist(err) { - os.RemoveAll(installDir) - } - - // unpack must occur in directory that holds the installation directory - // or the extraction will be double nested - return unpack(ctx, f, filepath.Dir(installDir)) -} - -func unpack(ctx context.Context, r io.Reader, dir string) error { - zr, err := gzip.NewReader(r) - if err != nil { - return errors.New("requires gzip-compressed body", err, errors.TypeFilesystem) - } - - tr := tar.NewReader(zr) - var rootDir string - - for { - // exit and propagate cancellation err as soon as we know about it - if err := ctx.Err(); err != nil { - return err - } - - f, err := tr.Next() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return err - } - - if !validFileName(f.Name) { - return errors.New("tar contained invalid filename: %q", f.Name, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) - } - rel := filepath.FromSlash(f.Name) - abs := filepath.Join(dir, rel) - - // find the root dir - if currentDir := filepath.Dir(abs); rootDir == "" || len(filepath.Dir(rootDir)) > len(currentDir) { - rootDir = currentDir - } - - fi := f.FileInfo() - mode := fi.Mode() - switch { - case mode.IsRegular(): - // just to be sure, it should already be created by Dir type - if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { - return errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) - } - - wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm()) - if err != nil { - return errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) - } - - _, err = io.Copy(wf, tr) - - if err == nil { - // sometimes we try executing binary too fast and run into text file busy after unpacking - // syncing prevents this - if syncErr := wf.Sync(); syncErr != nil { - err = syncErr - } - } - - if closeErr := wf.Close(); closeErr != nil && err == nil { - err = closeErr - } - - if err != nil { - return fmt.Errorf("TarInstaller: error writing to %s: %w", abs, err) - } - case mode.IsDir(): - if err := os.MkdirAll(abs, 0755); err != nil { - return errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) - } - default: - return errors.New(fmt.Sprintf("tar file entry %s contained unsupported file type %v", f.Name, mode), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) - } - } - - return nil -} - -func validFileName(p string) bool { - if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") { - return false - } - return true -} diff --git a/internal/pkg/artifact/install/zip/zip_installer.go b/internal/pkg/artifact/install/zip/zip_installer.go deleted file mode 100644 index e5ce9f7a41e..00000000000 --- a/internal/pkg/artifact/install/zip/zip_installer.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package zip - -import ( - "archive/zip" - "context" - "io" - "os" - "path/filepath" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" -) - -// Installer or zip packages -type Installer struct { - config *artifact.Config -} - -// NewInstaller creates an installer able to install zip packages -func NewInstaller(config *artifact.Config) (*Installer, error) { - return &Installer{ - config: config, - }, nil -} - -// Install performs installation of program in a specific version. -// It expects package to be already downloaded. -func (i *Installer) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - artifactPath, err := artifact.GetArtifactPath(spec, version, i.config.OS(), i.config.Arch(), i.config.TargetDirectory) - if err != nil { - return err - } - - // cleanup install directory before unzip - _, err = os.Stat(installDir) - if err == nil || os.IsExist(err) { - os.RemoveAll(installDir) - } - - if err := i.unzip(ctx, artifactPath); err != nil { - return err - } - - rootDir, err := i.getRootDir(artifactPath) - if err != nil { - return err - } - - // if root directory is not the same as desired directory rename - // e.g contains `-windows-` or `-SNAPSHOT-` - if rootDir != installDir { - defer syncDir(rootDir) - defer syncDir(installDir) - - if err := os.Rename(rootDir, installDir); err != nil { - return errors.New(err, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, installDir)) - } - - } - - return nil -} - -func (i *Installer) unzip(_ context.Context, artifactPath string) error { - r, err := zip.OpenReader(artifactPath) - if err != nil { - return err - } - defer r.Close() - - if err := os.MkdirAll(i.config.InstallPath, 0755); err != nil && !os.IsExist(err) { - // failed to create install dir - return err - } - - unpackFile := func(f *zip.File) (err error) { - rc, err := f.Open() - if err != nil { - return err - } - defer func() { - if cerr := rc.Close(); cerr != nil { - err = multierror.Append(err, cerr) - } - }() - - path := filepath.Join(i.config.InstallPath, f.Name) - - if f.FileInfo().IsDir() { - os.MkdirAll(path, f.Mode()) - } else { - os.MkdirAll(filepath.Dir(path), f.Mode()) - f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) - if err != nil { - return err - } - defer func() { - if closeErr := f.Close(); closeErr != nil { - err = multierror.Append(err, closeErr) - } - }() - - if _, err = io.Copy(f, rc); err != nil { - return err - } - - // sometimes we try executing binary too fast and run into text file busy after unpacking - // syncing prevents this - f.Sync() - } - return nil - } - - for _, f := range r.File { - if err := unpackFile(f); err != nil { - return err - } - } - - return nil -} - -// retrieves root directory from zip archive -func (i *Installer) getRootDir(zipPath string) (dir string, err error) { - defer func() { - if dir != "" { - dir = filepath.Join(i.config.InstallPath, dir) - } - }() - - zipReader, err := zip.OpenReader(zipPath) - if err != nil { - return "", err - } - defer zipReader.Close() - - var rootDir string - for _, f := range zipReader.File { - if filepath.Base(f.Name) == filepath.Dir(f.Name) { - return f.Name, nil - } - - if currentDir := filepath.Dir(f.Name); rootDir == "" || len(currentDir) < len(rootDir) { - rootDir = currentDir - } - } - - return rootDir, nil -} - -func syncDir(dir string) { - if f, err := os.OpenFile(dir, os.O_RDWR, 0777); err == nil { - f.Sync() - f.Close() - } -} diff --git a/internal/pkg/artifact/uninstall/hooks/hooks_uninstaller.go b/internal/pkg/artifact/uninstall/hooks/hooks_uninstaller.go deleted file mode 100644 index 461d64b4476..00000000000 --- a/internal/pkg/artifact/uninstall/hooks/hooks_uninstaller.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package hooks - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -type embeddedUninstaller interface { - Uninstall(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// Uninstaller that executes PreUninstallSteps -type Uninstaller struct { - uninstaller embeddedUninstaller -} - -// NewUninstaller creates an uninstaller that executes PreUninstallSteps -func NewUninstaller(i embeddedUninstaller) (*Uninstaller, error) { - return &Uninstaller{ - uninstaller: i, - }, nil -} - -// Uninstall performs the execution of the PreUninstallSteps -func (i *Uninstaller) Uninstall(ctx context.Context, spec program.Spec, version, installDir string) error { - if spec.PreUninstallSteps != nil { - return spec.PreUninstallSteps.Execute(ctx, installDir) - } - return i.uninstaller.Uninstall(ctx, spec, version, installDir) -} diff --git a/internal/pkg/artifact/uninstall/uninstaller.go b/internal/pkg/artifact/uninstall/uninstaller.go deleted file mode 100644 index a5eb73f669f..00000000000 --- a/internal/pkg/artifact/uninstall/uninstaller.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package uninstall - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall/hooks" -) - -// Uninstaller is an interface allowing un-installation of an artifact -type Uninstaller interface { - // Uninstall uninstalls an artifact. - Uninstall(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// NewUninstaller returns a correct uninstaller. -func NewUninstaller() (Uninstaller, error) { - return hooks.NewUninstaller(&nilUninstaller{}) -} - -type nilUninstaller struct{} - -func (*nilUninstaller) Uninstall(_ context.Context, _ program.Spec, _, _ string) error { - return nil -} diff --git a/internal/pkg/core/app/descriptor.go b/internal/pkg/core/app/descriptor.go deleted file mode 100644 index 84c7c1019ae..00000000000 --- a/internal/pkg/core/app/descriptor.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package app - -import ( - "path/filepath" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" -) - -// Descriptor defines a program which needs to be run. -// Is passed around operator operations. -type Descriptor struct { - spec program.Spec - executionCtx ExecutionContext - directory string - process ProcessSpec -} - -// NewDescriptor creates a program which satisfies Program interface and can be used with Operator. -func NewDescriptor(spec program.Spec, version string, config *artifact.Config, tags map[Tag]string) *Descriptor { - dir := paths.Components() - return NewDescriptorWithPath(dir, spec, version, config, tags) -} - -// NewDescriptorOnPath creates a program which satisfies Program interface and can be used with Operator. -func NewDescriptorWithPath(path string, spec program.Spec, version string, config *artifact.Config, tags map[Tag]string) *Descriptor { - servicePort := 0 - if spec.ServicePort > 0 { - servicePort = spec.ServicePort - } - - return &Descriptor{ - spec: spec, - directory: path, - executionCtx: NewExecutionContext(servicePort, spec.CommandName(), version, tags), - process: specification(path, spec), - } -} - -// ServicePort is the port the service will connect to gather GRPC information. When this is not -// 0 then the application is ran using the `service` application type, versus a `process` application. -func (p *Descriptor) ServicePort() int { - return p.executionCtx.ServicePort -} - -// BinaryName is the name of the binary. E.g filebeat. -func (p *Descriptor) BinaryName() string { - return p.executionCtx.BinaryName -} - -// Version specifies a version of the applications e.g '7.2.0'. -func (p *Descriptor) Version() string { return p.executionCtx.Version } - -// Tags is a collection of tags used to specify application more precisely. -// Two descriptor with same binary name and version but with different tags will -// result in two different instances of the application. -func (p *Descriptor) Tags() map[Tag]string { return p.executionCtx.Tags } - -// ID is a unique representation of the application. -func (p *Descriptor) ID() string { return p.executionCtx.ID } - -// ExecutionContext returns execution context of the application. -func (p *Descriptor) ExecutionContext() ExecutionContext { return p.executionCtx } - -// Spec returns a program specification with resolved binary path. -func (p *Descriptor) Spec() program.Spec { - return p.spec -} - -// ProcessSpec returns a process specification with resolved binary path. -func (p *Descriptor) ProcessSpec() ProcessSpec { - return p.process -} - -// Directory specifies the root directory of the application within an install path. -func (p *Descriptor) Directory() string { - return p.directory -} - -func specification(dir string, spec program.Spec) ProcessSpec { - return ProcessSpec{ - BinaryPath: filepath.Join(dir, spec.Command()), - Args: spec.Args, - Configuration: nil, - } -} diff --git a/internal/pkg/core/app/execution_context.go b/internal/pkg/core/app/execution_context.go deleted file mode 100644 index 48479403aa8..00000000000 --- a/internal/pkg/core/app/execution_context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package app - -import ( - "crypto/sha256" - "fmt" -) - -const ( - hashLen = 16 -) - -// ExecutionContext describes runnable binary -type ExecutionContext struct { - ServicePort int - BinaryName string - Version string - Tags map[Tag]string - ID string -} - -// NewExecutionContext creates an execution context and generates an ID for this context -func NewExecutionContext(servicePort int, binaryName, version string, tags map[Tag]string) ExecutionContext { - id := fmt.Sprintf("%s--%s", binaryName, version) - if len(tags) > 0 { - hash := fmt.Sprintf("%x", sha256.New().Sum([]byte(fmt.Sprint(tags)))) - if len(hash) > hashLen { - hash = hash[:hashLen] - } - id += fmt.Sprintf("--%x", hash) - } - - return ExecutionContext{ - ServicePort: servicePort, - BinaryName: binaryName, - Version: version, - Tags: tags, - ID: id, - } -} diff --git a/internal/pkg/core/app/process_cred.go b/internal/pkg/core/app/process_cred.go deleted file mode 100644 index ee6f4f0e2a4..00000000000 --- a/internal/pkg/core/app/process_cred.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build linux || darwin -// +build linux darwin - -package app - -import ( - "os" - "os/user" - "strconv" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" -) - -// UserGroup returns the uid and gid for the process specification. -func (spec ProcessSpec) UserGroup() (int, int, error) { - if spec.User.Uid == "" && spec.Group.Gid == "" { - // use own level - return os.Geteuid(), os.Getegid(), nil - } - - // check if user/group exists - usedUID := spec.User.Uid - userGID := "" - if u, err := user.LookupId(spec.User.Uid); err != nil { - u, err := user.Lookup(spec.User.Name) - if err != nil { - return 0, 0, err - } - usedUID = u.Uid - userGID = u.Gid - } else { - userGID = u.Gid - } - - usedGID := spec.Group.Gid - if spec.Group.Gid != "" || spec.Group.Name != "" { - if _, err := user.LookupGroupId(spec.Group.Gid); err != nil { - g, err := user.LookupGroup(spec.Group.Name) - if err != nil { - return 0, 0, err - } - - usedGID = g.Gid - } - } else { - // if group is not specified and user is found, use users group - usedGID = userGID - } - - uid, err := strconv.Atoi(usedUID) - if err != nil { - return 0, 0, errors.New(err, "invalid user") - } - - gid, _ := strconv.Atoi(usedGID) - if err != nil { - return 0, 0, errors.New(err, "invalid group") - } - - return uid, gid, nil -} diff --git a/internal/pkg/core/app/process_cred_other.go b/internal/pkg/core/app/process_cred_other.go deleted file mode 100644 index 49aa0ccd613..00000000000 --- a/internal/pkg/core/app/process_cred_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build !linux && !darwin -// +build !linux,!darwin - -package app - -// UserGroup returns the uid and gid for the process specification. -func (spec ProcessSpec) UserGroup() (int, int, error) { - return 0, 0, nil -} diff --git a/internal/pkg/core/app/spec.go b/internal/pkg/core/app/spec.go deleted file mode 100644 index 6f09c52e34b..00000000000 --- a/internal/pkg/core/app/spec.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package app - -import ( - "os/user" -) - -// ProcessSpec specifies a way of running a process -type ProcessSpec struct { - // Binary path. - BinaryPath string - - // Set of arguments. - Args []string - Configuration map[string]interface{} - - // Under what user we can run the program. (example: apm-server is not running as root, isolation and cgroup) - User user.User - Group user.Group - - // TODO: mapping transformation rules for configuration between elastic-agent.yml and to the beats. -} diff --git a/internal/pkg/core/app/tag.go b/internal/pkg/core/app/tag.go deleted file mode 100644 index e289190ad81..00000000000 --- a/internal/pkg/core/app/tag.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package app - -// Tag is a tag for specifying metadata related -// to a process. -type Tag string - -// TagSidecar tags a sidecar process -const TagSidecar = "sidecar" - -// Taggable is an object containing tags. -type Taggable interface { - Tags() map[Tag]string -} - -// IsSidecar returns true if tags contains sidecar flag. -func IsSidecar(descriptor Taggable) bool { - tags := descriptor.Tags() - _, isSidecar := tags[TagSidecar] - return isSidecar -} diff --git a/internal/pkg/core/monitoring/beats/beats_monitor.go b/internal/pkg/core/monitoring/beats/beats_monitor.go index d70878eb8a0..b795c5ecb58 100644 --- a/internal/pkg/core/monitoring/beats/beats_monitor.go +++ b/internal/pkg/core/monitoring/beats/beats_monitor.go @@ -13,9 +13,10 @@ import ( "strings" "unicode" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/config" monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" ) diff --git a/internal/pkg/core/monitoring/server/process.go b/internal/pkg/core/monitoring/server/process.go index cbd4ddcf3df..1d1d9c80806 100644 --- a/internal/pkg/core/monitoring/server/process.go +++ b/internal/pkg/core/monitoring/server/process.go @@ -15,11 +15,12 @@ import ( "syscall" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/gorilla/mux" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" ) diff --git a/internal/pkg/core/plugin/common.go b/internal/pkg/core/plugin/common.go deleted file mode 100644 index 145ff574b75..00000000000 --- a/internal/pkg/core/plugin/common.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package plugin - -import ( - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type configFetcher interface { - Config() string -} - -// IsRestartNeeded returns true if -// - spec is configured to support restart on change -// - output changes in between configs -func IsRestartNeeded(log *logger.Logger, spec program.Spec, cfgFetch configFetcher, newCfg map[string]interface{}) bool { - if !spec.RestartOnOutputChange { - // early exit if restart is not needed anyway - return false - } - - // compare outputs - curCfgStr := cfgFetch.Config() - if curCfgStr == "" { - // no config currently applied - return false - } - - currentOutput, err := getOutputConfigFromString(curCfgStr) - if err != nil { - log.Errorf("failed to retrieve output config from current state: %v", err) - return false - } - - newOutput, err := getOutputConfigFromMap(newCfg) - if err != nil { - log.Errorf("failed to retrieve output config from new state: %v", err) - return false - } - - // restart needed only if output changed - return currentOutput != newOutput -} - -func getOutputConfigFromString(cfgString string) (string, error) { - cfg, err := config.NewConfigFrom(cfgString) - if err != nil { - return "", err - } - - cfgMap, err := cfg.ToMapStr() - if err != nil { - return "", err - } - - return getOutputConfigFromMap(cfgMap) -} - -func getOutputConfigFromMap(cfgMap map[string]interface{}) (string, error) { - outputCfgIface, found := cfgMap["output"] - if !found { - // output not found not an error - return "", nil - } - - outputCfg, ok := outputCfgIface.(map[string]interface{}) - if !ok { - return "", errors.New("not a map") - } - - cfgStr, err := yaml.Marshal(outputCfg) - if err != nil { - return "", errors.New(err, errors.TypeApplication) - } - - return string(cfgStr), nil -} diff --git a/internal/pkg/core/plugin/common_test.go b/internal/pkg/core/plugin/common_test.go deleted file mode 100644 index 03f0306f145..00000000000 --- a/internal/pkg/core/plugin/common_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package plugin - -import ( - "testing" - - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestRestartNeeded(t *testing.T) { - tt := []struct { - Name string - OldOutput map[string]interface{} - NewOutput map[string]interface{} - ShouldRestart bool - - ExpectedRestart bool - }{ - { - "same empty output", - map[string]interface{}{}, - map[string]interface{}{}, - true, - false, - }, - { - "same not empty output", - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - true, - false, - }, - { - "different empty output", - map[string]interface{}{}, - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - true, - false, - }, - { - "different not empty output", - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "s3cur3_Pa55;"}}, - true, - true, - }, - { - "different not empty output no restart required", - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "s3cur3_Pa55;"}}, - false, - false, - }, - } - - for _, tc := range tt { - t.Run(tc.Name, func(t *testing.T) { - cf, err := newTestConfigFetcher(tc.OldOutput) - require.NoError(t, err) - s := testProgramSpec(tc.ShouldRestart) - l, _ := logger.New("tst", false) - - IsRestartNeeded(l, s, cf, tc.NewOutput) - }) - } -} - -func newTestConfigFetcher(cfg map[string]interface{}) (*testConfigFetcher, error) { - cfgStr, err := yaml.Marshal(cfg) - if err != nil { - return nil, errors.New(err, errors.TypeApplication) - } - - return &testConfigFetcher{cfg: string(cfgStr)}, nil -} - -type testConfigFetcher struct { - cfg string -} - -func (f testConfigFetcher) Config() string { return f.cfg } - -func testProgramSpec(restartOnOutput bool) program.Spec { - return program.Spec{ - RestartOnOutputChange: restartOnOutput, - } -} diff --git a/internal/pkg/core/plugin/process/app.go b/internal/pkg/core/plugin/process/app.go deleted file mode 100644 index c184cefe397..00000000000 --- a/internal/pkg/core/plugin/process/app.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - "fmt" - "os" - "reflect" - "sync" - "time" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -var ( - // ErrAppNotRunning is returned when configuration is performed on not running application. - ErrAppNotRunning = errors.New("application is not running", errors.TypeApplication) - procExitTimeout = 10 * time.Second -) - -// Application encapsulates a concrete application ran by elastic-agent e.g Beat. -type Application struct { - bgContext context.Context - id string - name string - pipelineID string - logLevel string - desc *app.Descriptor - srv *server.Server - srvState *server.ApplicationState - limiter *tokenbucket.Bucket - startContext context.Context - tag app.Taggable - state state.State - reporter state.Reporter - watchClosers map[int]context.CancelFunc - - uid int - gid int - - monitor monitoring.Monitor - statusReporter status.Reporter - - processConfig *process.Config - - logger *logger.Logger - - appLock sync.Mutex - restartCanceller context.CancelFunc - restartConfig map[string]interface{} -} - -// ArgsDecorator decorates arguments before calling an application -type ArgsDecorator func([]string) []string - -// NewApplication creates a new instance of an applications. It will not automatically start -// the application. -func NewApplication( - ctx context.Context, - id, appName, pipelineID, logLevel string, - desc *app.Descriptor, - srv *server.Server, - cfg *configuration.SettingsConfig, - logger *logger.Logger, - reporter state.Reporter, - monitor monitoring.Monitor, - statusController status.Controller) (*Application, error) { - - s := desc.ProcessSpec() - uid, gid, err := s.UserGroup() - if err != nil { - return nil, err - } - - b, _ := tokenbucket.NewTokenBucket(ctx, 3, 3, 1*time.Second) - return &Application{ - bgContext: ctx, - id: id, - name: appName, - pipelineID: pipelineID, - logLevel: logLevel, - desc: desc, - srv: srv, - processConfig: cfg.ProcessConfig, - logger: logger, - limiter: b, - state: state.State{ - Status: state.Stopped, - }, - reporter: reporter, - monitor: monitor, - uid: uid, - gid: gid, - statusReporter: statusController.RegisterApp(id, appName), - watchClosers: make(map[int]context.CancelFunc), - }, nil -} - -// Monitor returns monitoring handler of this app. -func (a *Application) Monitor() monitoring.Monitor { - return a.monitor -} - -// Spec returns the program spec of this app. -func (a *Application) Spec() program.Spec { - return a.desc.Spec() -} - -// State returns the application state. -func (a *Application) State() state.State { - a.appLock.Lock() - defer a.appLock.Unlock() - return a.state -} - -// Name returns application name -func (a *Application) Name() string { - return a.name -} - -// Started returns true if the application is started. -func (a *Application) Started() bool { - return a.state.Status != state.Stopped && a.state.Status != state.Crashed && a.state.Status != state.Failed -} - -// Stop stops the current application. -func (a *Application) Stop() { - a.appLock.Lock() - status := a.state.Status - srvState := a.srvState - a.appLock.Unlock() - - if status == state.Stopped { - return - } - - if srvState != nil { - // signal stop through GRPC, wait and kill is performed later in gracefulKill - if err := srvState.Stop(a.processConfig.StopTimeout); err != nil { - err := fmt.Errorf("failed to stop after %s: %w", a.processConfig.StopTimeout, err) - a.setState(state.Failed, err.Error(), nil) - - a.logger.Error(err) - } - - } - - a.appLock.Lock() - defer a.appLock.Unlock() - - a.srvState = nil - if a.state.ProcessInfo != nil { - // stop and clean watcher - a.stopWatcher(a.state.ProcessInfo) - a.gracefulKill(a.state.ProcessInfo) - - a.state.ProcessInfo = nil - - // cleanup drops - a.cleanUp() - } - a.setState(state.Stopped, "Stopped", nil) -} - -// Shutdown stops the application (aka. subprocess). -func (a *Application) Shutdown() { - a.logger.Infof("Signaling application to stop because of shutdown: %s", a.id) - a.Stop() -} - -// SetState sets the status of the application. -func (a *Application) SetState(s state.Status, msg string, payload map[string]interface{}) { - a.appLock.Lock() - defer a.appLock.Unlock() - a.setState(s, msg, payload) -} - -func (a *Application) watch(ctx context.Context, p app.Taggable, proc *process.Info, cfg map[string]interface{}) { - go func() { - var procState *os.ProcessState - - select { - case ps := <-a.waitProc(proc.Process): - procState = ps - case <-a.bgContext.Done(): - return - case <-ctx.Done(): - // closer called - return - } - - a.appLock.Lock() - defer a.appLock.Unlock() - if a.state.ProcessInfo != proc { - // already another process started, another watcher is watching instead - a.gracefulKill(proc) - return - } - - // stop the watcher - a.stopWatcher(a.state.ProcessInfo) - - // was already stopped by Stop, do not restart - if a.state.Status == state.Stopped { - return - } - - a.state.ProcessInfo = nil - srvState := a.srvState - - if srvState == nil || srvState.Expected() == proto.StateExpected_STOPPING { - return - } - - msg := fmt.Sprintf("exited with code: %d", procState.ExitCode()) - a.setState(state.Restarting, msg, nil) - - // it was a crash - a.start(ctx, p, cfg, true) - }() -} - -func (a *Application) stopWatcher(procInfo *process.Info) { - if procInfo != nil { - if closer, ok := a.watchClosers[procInfo.PID]; ok { - closer() - delete(a.watchClosers, procInfo.PID) - } - } -} - -func (a *Application) waitProc(proc *os.Process) <-chan *os.ProcessState { - resChan := make(chan *os.ProcessState) - - go func() { - procState, err := proc.Wait() - if err != nil { - // process is not a child - some OSs requires process to be child - a.externalProcess(proc) - } - - resChan <- procState - }() - - return resChan -} - -func (a *Application) setState(s state.Status, msg string, payload map[string]interface{}) { - if a.state.Status != s || a.state.Message != msg || !reflect.DeepEqual(a.state.Payload, payload) { - if state.IsStateFiltered(msg, payload) { - return - } - - a.state.Status = s - a.state.Message = msg - a.state.Payload = payload - if a.reporter != nil { - go a.reporter.OnStateChange(a.id, a.name, a.state) - } - a.statusReporter.Update(s, msg, payload) - } -} - -func (a *Application) cleanUp() { - a.monitor.Cleanup(a.desc.Spec(), a.pipelineID) -} - -func (a *Application) gracefulKill(proc *process.Info) { - if proc == nil || proc.Process == nil { - return - } - - // send stop signal to request stop - if err := proc.Stop(); err != nil { - a.logger.Errorf("failed to stop %s: %v", a.Name(), err) - } - - var wg sync.WaitGroup - doneChan := make(chan struct{}) - wg.Add(1) - go func() { - wg.Done() - - if _, err := proc.Process.Wait(); err != nil { - // process is not a child - some OSs requires process to be child - a.externalProcess(proc.Process) - } - close(doneChan) - }() - - // wait for awaiter - wg.Wait() - - // kill in case it's still running after timeout - t := time.NewTimer(procExitTimeout) - defer t.Stop() - select { - case <-doneChan: - case <-t.C: - a.logger.Infof("gracefulKill timed out after %d, killing %s", - procExitTimeout, a.Name()) - _ = proc.Process.Kill() - } -} diff --git a/internal/pkg/core/plugin/process/configure.go b/internal/pkg/core/plugin/process/configure.go deleted file mode 100644 index 57f12e191de..00000000000 --- a/internal/pkg/core/plugin/process/configure.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin" - "github.com/elastic/elastic-agent/internal/pkg/core/state" -) - -// Configure configures the application with the passed configuration. -func (a *Application) Configure(ctx context.Context, config map[string]interface{}) (err error) { - defer func() { - if err != nil { - // inject App metadata - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - a.statusReporter.Update(state.Degraded, err.Error(), nil) - } - }() - - a.appLock.Lock() - defer a.appLock.Unlock() - - if a.state.Status == state.Stopped { - return errors.New(ErrAppNotRunning) - } - if a.srvState == nil { - return errors.New(ErrAppNotRunning) - } - - cfgStr, err := yaml.Marshal(config) - if err != nil { - return errors.New(err, errors.TypeApplication) - } - - isRestartNeeded := plugin.IsRestartNeeded(a.logger, a.Spec(), a.srvState, config) - - err = a.srvState.UpdateConfig(string(cfgStr)) - if err != nil { - return errors.New(err, errors.TypeApplication) - } - - if isRestartNeeded { - a.logger.Infof("initiating restart of '%s' due to config change", a.Name()) - a.appLock.Unlock() - a.Stop() - err = a.Start(ctx, a.desc, config) - // lock back so it won't panic on deferred unlock - a.appLock.Lock() - } - - return err -} diff --git a/internal/pkg/core/plugin/process/start.go b/internal/pkg/core/plugin/process/start.go deleted file mode 100644 index 55081d9977d..00000000000 --- a/internal/pkg/core/plugin/process/start.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - "fmt" - "io" - "os/exec" - "path/filepath" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -// Start starts the application with a specified config. -func (a *Application) Start(ctx context.Context, t app.Taggable, cfg map[string]interface{}) error { - a.appLock.Lock() - defer a.appLock.Unlock() - - return a.start(ctx, t, cfg, false) -} - -// Start starts the application without grabbing the lock. -func (a *Application) start(ctx context.Context, t app.Taggable, cfg map[string]interface{}, isRestart bool) (err error) { - defer func() { - if err != nil { - // inject App metadata - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - } - }() - - // starting only if it's not running - // or if it is, then only in case it's a restart and this call initiates from restart call - if a.Started() && a.state.Status != state.Restarting { - if a.state.ProcessInfo == nil { - // already started if not stopped or crashed - return nil - } - - // in case app reported status it might still be running and failure timer - // in progress. Stop timer and stop failing process - a.stopFailedTimer() - a.stopWatcher(a.state.ProcessInfo) - - // kill the process - _ = a.state.ProcessInfo.Process.Kill() - a.state.ProcessInfo = nil - } - - if a.state.Status == state.Restarting && !isRestart { - return nil - } - - cfgStr, err := yaml.Marshal(cfg) - if err != nil { - return fmt.Errorf("%q could not unmarshal config from yaml: %w", a.Name(), err) - } - - a.startContext = ctx - a.tag = t - srvState := a.srvState - - // Failed applications can be started again. - if srvState != nil { - a.setState(state.Starting, "Starting", nil) - srvState.SetStatus(proto.StateObserved_STARTING, a.state.Message, a.state.Payload) - srvState.UpdateConfig(srvState.Config()) - } else { - a.srvState, err = a.srv.Register(a, string(cfgStr)) - if err != nil { - return err - } - // Set input types from the spec - a.srvState.SetInputTypes(a.desc.Spec().ActionInputTypes) - } - - if a.state.Status != state.Stopped { - // restarting as it was previously in a different state - a.setState(state.Restarting, "Restarting", nil) - } else if a.state.Status != state.Restarting { - // keep restarting state otherwise it's starting - a.setState(state.Starting, "Starting", nil) - } - - defer func() { - if err != nil { - if a.srvState != nil { - a.srvState.Destroy() - a.srvState = nil - } - if a.state.ProcessInfo != nil { - _ = a.state.ProcessInfo.Process.Kill() - a.state.ProcessInfo = nil - } - } - }() - - if err := a.monitor.Prepare(a.desc.Spec(), a.pipelineID, a.uid, a.gid); err != nil { - return fmt.Errorf("%q failed to prepare monitor for %q: %w", - a.Name(), a.desc.Spec().Name, err) - } - - if a.limiter != nil { - a.limiter.Add() - } - - spec := a.desc.ProcessSpec() - spec.Args = injectLogLevel(a.logLevel, spec.Args) - - // use separate file - isSidecar := app.IsSidecar(t) - spec.Args = a.monitor.EnrichArgs(a.desc.Spec(), a.pipelineID, spec.Args, isSidecar) - - // specify beat name to avoid data lock conflicts - // as for https://github.com/elastic/beats/v7/pull/14030 more than one instance - // of the beat with same data path fails to start - spec.Args = injectDataPath(spec.Args, a.pipelineID, a.id) - - a.state.ProcessInfo, err = process.Start( - spec.BinaryPath, - a.uid, - a.gid, - spec.Args, nil, func(c *exec.Cmd) error { - c.Stdout = newLoggerWriter(a.Name(), logStdOut, a.logger) - c.Stderr = newLoggerWriter(a.Name(), logStdErr, a.logger) - return nil - }) - if err != nil { - return fmt.Errorf("%q failed to start %q: %w", - a.Name(), spec.BinaryPath, err) - } - - // write connect info to stdin - go a.writeToStdin(a.srvState, a.state.ProcessInfo.Stdin) - - // create closer for watcher, used to terminate watcher without - // side effect of restarting process during shutdown - cancelCtx, cancel := context.WithCancel(ctx) - a.watchClosers[a.state.ProcessInfo.PID] = cancel - // setup watcher - a.watch(cancelCtx, t, a.state.ProcessInfo, cfg) - - return nil -} - -func (a *Application) writeToStdin(as *server.ApplicationState, wc io.WriteCloser) { - err := as.WriteConnInfo(wc) - if err != nil { - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - a.logger.Errorf("%q failed writing connection info to spawned application: %v", a.Name(), err) - } - _ = wc.Close() -} - -func injectLogLevel(logLevel string, args []string) []string { - var level string - // Translate to level beat understands - switch logLevel { - case "info": - level = "info" - case "debug": - level = "debug" - case "warning": - level = "warning" - case "error": - level = "error" - } - - if args == nil || level == "" { - return args - } - - return append(args, "-E", "logging.level="+level) -} - -func injectDataPath(args []string, pipelineID, id string) []string { - dataPath := filepath.Join(paths.Home(), "run", pipelineID, id) - return append(args, "-E", "path.data="+dataPath) -} diff --git a/internal/pkg/core/plugin/process/status.go b/internal/pkg/core/plugin/process/status.go deleted file mode 100644 index 50488dfd77b..00000000000 --- a/internal/pkg/core/plugin/process/status.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - "fmt" - "time" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -// OnStatusChange is the handler called by the GRPC server code. -// -// It updates the status of the application and handles restarting the application if needed. -func (a *Application) OnStatusChange(s *server.ApplicationState, status proto.StateObserved_Status, msg string, payload map[string]interface{}) { - a.appLock.Lock() - defer a.appLock.Unlock() - - // If the application is stopped, do not update the state. Stopped is a final state - // and should not be overridden. - if a.state.Status == state.Stopped { - return - } - - a.setState(state.FromProto(status), msg, payload) - if status == proto.StateObserved_FAILED { - // ignore when expected state is stopping - if s.Expected() == proto.StateExpected_STOPPING { - return - } - - // it was marshalled to pass into the state, so unmarshall will always succeed - var cfg map[string]interface{} - _ = yaml.Unmarshal([]byte(s.Config()), &cfg) - - // start the failed timer - // pass process info to avoid killing new process spun up in a meantime - a.startFailedTimer(cfg, a.state.ProcessInfo) - } else { - a.stopFailedTimer() - } -} - -// startFailedTimer starts a timer that will restart the application if it doesn't exit failed after a period of time. -// -// This does not grab the appLock, that must be managed by the caller. -func (a *Application) startFailedTimer(cfg map[string]interface{}, proc *process.Info) { - if a.restartCanceller != nil { - // already have running failed timer; just update config - a.restartConfig = cfg - return - } - - ctx, cancel := context.WithCancel(a.startContext) - a.restartCanceller = cancel - a.restartConfig = cfg - t := time.NewTimer(a.processConfig.FailureTimeout) - go func() { - defer func() { - a.appLock.Lock() - a.restartCanceller = nil - a.restartConfig = nil - a.appLock.Unlock() - }() - - select { - case <-ctx.Done(): - return - case <-t.C: - a.restart(proc) - } - }() -} - -// stopFailedTimer stops the timer that would restart the application from reporting failure. -// -// This does not grab the appLock, that must be managed by the caller. -func (a *Application) stopFailedTimer() { - if a.restartCanceller == nil { - return - } - a.restartCanceller() - a.restartCanceller = nil -} - -// restart restarts the application -func (a *Application) restart(proc *process.Info) { - a.appLock.Lock() - defer a.appLock.Unlock() - - // stop the watcher - a.stopWatcher(proc) - - // kill the process - if proc != nil && proc.Process != nil { - _ = proc.Process.Kill() - } - - if proc != a.state.ProcessInfo { - // we're restarting different process than actually running - // no need to start another one - return - } - - a.state.ProcessInfo = nil - - ctx := a.startContext - tag := a.tag - - a.setState(state.Restarting, "", nil) - err := a.start(ctx, tag, a.restartConfig, true) - if err != nil { - a.setState(state.Crashed, fmt.Sprintf("failed to restart: %s", err), nil) - } -} diff --git a/internal/pkg/core/plugin/process/stdlogger.go b/internal/pkg/core/plugin/process/stdlogger.go deleted file mode 100644 index 4c7d8625216..00000000000 --- a/internal/pkg/core/plugin/process/stdlogger.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type logStd int - -const ( - agentConsoleName = "agent.console.name" - agentConsoleType = "agent.console.type" - - logStdOut logStd = iota - logStdErr -) - -func (l logStd) String() string { - switch l { - case logStdOut: - return "stdout" - case logStdErr: - return "stderr" - } - - return "unknown" -} - -type loggerWriter struct { - format string - logf func(format string, args ...interface{}) -} - -func newLoggerWriter(appName string, std logStd, log *logger.Logger) loggerWriter { - log = log.With( - agentConsoleName, appName, - agentConsoleType, std.String()) - - logf := log.Infof - if std == logStdErr { - logf = log.Errorf - } - - return loggerWriter{ - format: appName + " " + std.String() + ": %q", - logf: logf, - } -} - -func (l loggerWriter) Write(p []byte) (n int, err error) { - l.logf(l.format, string(p)) - return len(p), nil -} diff --git a/internal/pkg/core/plugin/process/stdlogger_test.go b/internal/pkg/core/plugin/process/stdlogger_test.go deleted file mode 100644 index 959f387c32a..00000000000 --- a/internal/pkg/core/plugin/process/stdlogger_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zapcore" - - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func Test_loggerWriter(t *testing.T) { - tc := []struct { - name string - args struct { - appName string - logTo logStd - } - logMsg string - logLevel zapcore.Level - }{ - { - name: "capture stdout", - args: struct { - appName string - logTo logStd - }{ - appName: "somebeats", - logTo: logStdOut, - }, - logMsg: "stdout log", - logLevel: zapcore.InfoLevel, - }, - { - name: "capture stderr", - args: struct { - appName string - logTo logStd - }{ - appName: "somebeats", - logTo: logStdErr, - }, - logMsg: "stderr log", - logLevel: zapcore.ErrorLevel, - }, - } - - for _, tt := range tc { - logg, obs := logger.NewTesting("test-loggerWriter") - logg = logg.With("previous-field", "previous-value") - - l := newLoggerWriter(tt.args.appName, tt.args.logTo, logg) - _, _ = l.Write([]byte(tt.logMsg)) - - logs := obs.All() - require.Equal(t, 1, len(logs)) - - log := logs[0] - assert.Equal(t, log.Level, tt.logLevel) - assert.Contains(t, log.Message, tt.logMsg) - assert.Equal(t, log.ContextMap()[agentConsoleName], tt.args.appName) - assert.Equal(t, log.ContextMap()[agentConsoleType], tt.args.logTo.String()) - assert.Equal(t, log.ContextMap()["previous-field"], "previous-value") - } -} diff --git a/internal/pkg/core/plugin/process/watch_posix.go b/internal/pkg/core/plugin/process/watch_posix.go deleted file mode 100644 index 7e3e809e7bc..00000000000 --- a/internal/pkg/core/plugin/process/watch_posix.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build !windows -// +build !windows - -package process - -import ( - "os" - "syscall" - "time" -) - -// externalProcess is a watch mechanism used in cases where OS requires -// a process to be a child for waiting for process. We need to be able -// await any process. -// This operation is long running. -func (a *Application) externalProcess(proc *os.Process) { - if proc == nil { - return - } - - for { - <-time.After(1 * time.Second) - if proc.Signal(syscall.Signal(0)) != nil { - // failed to contact process, return - return - } - } -} diff --git a/internal/pkg/core/plugin/process/watch_windows.go b/internal/pkg/core/plugin/process/watch_windows.go deleted file mode 100644 index d5baeb1e895..00000000000 --- a/internal/pkg/core/plugin/process/watch_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build windows -// +build windows - -package process - -import ( - "os" - "syscall" - "time" -) - -const ( - // exitCodeStillActive according to docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess - exitCodeStillActive = 259 -) - -// externalProcess is a watch mechanism used in cases where OS requires -// a process to be a child for waiting for process. We need to be able -// await any process -func (a *Application) externalProcess(proc *os.Process) { - if proc == nil { - return - } - - for { - <-time.After(1 * time.Second) - if isWindowsProcessExited(proc.Pid) { - return - } - } -} - -func isWindowsProcessExited(pid int) bool { - const desiredAccess = syscall.STANDARD_RIGHTS_READ | syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE - h, err := syscall.OpenProcess(desiredAccess, false, uint32(pid)) - if err != nil { - // failed to open handle, report exited - return true - } - - // get exit code, this returns immediately in case it is still running - // it returns exitCodeStillActive - var ec uint32 - if err := syscall.GetExitCodeProcess(h, &ec); err != nil { - // failed to contact, report exited - return true - } - - return ec != exitCodeStillActive -} diff --git a/internal/pkg/core/plugin/service/app.go b/internal/pkg/core/plugin/service/app.go deleted file mode 100644 index ab3631f12c0..00000000000 --- a/internal/pkg/core/plugin/service/app.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package service - -import ( - "context" - "fmt" - "io" - "net" - "reflect" - "sync" - "time" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -var ( - // ErrAppNotInstalled is returned when configuration is performed on not installed application. - ErrAppNotInstalled = errors.New("application is not installed", errors.TypeApplication) -) - -// Application encapsulates an application that is ran as a service by the system service manager. -type Application struct { - bgContext context.Context - id string - name string - pipelineID string - logLevel string - desc *app.Descriptor - srv *server.Server - srvState *server.ApplicationState - limiter *tokenbucket.Bucket - state state.State - reporter state.Reporter - - uid int - gid int - - monitor monitoring.Monitor - statusReporter status.Reporter - - processConfig *process.Config - - logger *logger.Logger - - credsPort int - credsWG sync.WaitGroup - credsListener net.Listener - - appLock sync.Mutex -} - -// NewApplication creates a new instance of an applications. -func NewApplication( - ctx context.Context, - id, appName, pipelineID, logLevel string, - credsPort int, - desc *app.Descriptor, - srv *server.Server, - cfg *configuration.SettingsConfig, - logger *logger.Logger, - reporter state.Reporter, - monitor monitoring.Monitor, - statusController status.Controller) (*Application, error) { - - s := desc.ProcessSpec() - uid, gid, err := s.UserGroup() - if err != nil { - return nil, err - } - - b, _ := tokenbucket.NewTokenBucket(ctx, 3, 3, 1*time.Second) - return &Application{ - bgContext: ctx, - id: id, - name: appName, - pipelineID: pipelineID, - logLevel: logLevel, - desc: desc, - srv: srv, - processConfig: cfg.ProcessConfig, - logger: logger, - limiter: b, - state: state.State{ - Status: state.Stopped, - }, - reporter: reporter, - monitor: monitor, - uid: uid, - gid: gid, - credsPort: credsPort, - statusReporter: statusController.RegisterApp(id, appName), - }, nil -} - -// Monitor returns monitoring handler of this app. -func (a *Application) Monitor() monitoring.Monitor { - return a.monitor -} - -// Spec returns the program spec of this app. -func (a *Application) Spec() program.Spec { - return a.desc.Spec() -} - -// State returns the application state. -func (a *Application) State() state.State { - a.appLock.Lock() - defer a.appLock.Unlock() - return a.state -} - -// Name returns application name -func (a *Application) Name() string { - return a.name -} - -// Started returns true if the application is started. -func (a *Application) Started() bool { - return a.srvState != nil -} - -// SetState sets the status of the application. -func (a *Application) SetState(s state.Status, msg string, payload map[string]interface{}) { - a.appLock.Lock() - defer a.appLock.Unlock() - a.setState(s, msg, payload) -} - -// Start starts the application with a specified config. -func (a *Application) Start(ctx context.Context, _ app.Taggable, cfg map[string]interface{}) (err error) { - defer func() { - if err != nil { - // inject App metadata - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - } - }() - - a.appLock.Lock() - defer a.appLock.Unlock() - - cfgStr, err := yaml.Marshal(cfg) - if err != nil { - return err - } - - // already started - if a.srvState != nil { - a.setState(state.Starting, "Starting", nil) - a.srvState.SetStatus(proto.StateObserved_STARTING, a.state.Message, a.state.Payload) - a.srvState.UpdateConfig(a.srvState.Config()) - } else { - a.setState(state.Starting, "Starting", nil) - - a.srvState, err = a.srv.Register(a, string(cfgStr)) - if err != nil { - return err - } - - // Set input types from the spec - a.srvState.SetInputTypes(a.desc.Spec().ActionInputTypes) - } - - defer func() { - if err != nil { - if a.srvState != nil { - a.srvState.Destroy() - a.srvState = nil - } - } - }() - - if err := a.monitor.Prepare(a.desc.Spec(), a.pipelineID, a.uid, a.gid); err != nil { - return err - } - - if a.limiter != nil { - a.limiter.Add() - } - - // start the credentials listener for the service - if err := a.startCredsListener(); err != nil { - return err - } - - // allow the service manager to ensure that the application is started, currently this does not start/stop - // the actual service in the system service manager - - return nil -} - -// Configure configures the application with the passed configuration. -func (a *Application) Configure(ctx context.Context, config map[string]interface{}) (err error) { - defer func() { - if err != nil { - // inject App metadata - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - a.statusReporter.Update(state.Degraded, err.Error(), nil) - } - }() - - a.appLock.Lock() - defer a.appLock.Unlock() - - if a.srvState == nil { - return errors.New(ErrAppNotInstalled) - } - - cfgStr, err := yaml.Marshal(config) - if err != nil { - return errors.New(err, errors.TypeApplication) - } - - isRestartNeeded := plugin.IsRestartNeeded(a.logger, a.Spec(), a.srvState, config) - - err = a.srvState.UpdateConfig(string(cfgStr)) - if err != nil { - return errors.New(err, errors.TypeApplication) - } - - if isRestartNeeded { - a.logger.Infof("initiating restart of '%s' due to config change", a.Name()) - a.appLock.Unlock() - a.Stop() - err = a.Start(ctx, a.desc, config) - // lock back so it wont panic on deferred unlock - a.appLock.Lock() - } - - return err -} - -// Stop stops the current application. -func (a *Application) Stop() { - a.appLock.Lock() - srvState := a.srvState - a.appLock.Unlock() - - if srvState == nil { - return - } - - if err := srvState.Stop(a.processConfig.StopTimeout); err != nil { - a.appLock.Lock() - a.setState( - state.Failed, - fmt.Errorf("failed to stop after %s: %w", a.processConfig.StopTimeout, err).Error(), - nil) - } else { - a.appLock.Lock() - a.setState(state.Stopped, "Stopped", nil) - } - a.srvState = nil - - a.cleanUp() - a.stopCredsListener() - a.appLock.Unlock() -} - -// Shutdown disconnects the service, but doesn't signal it to stop. -func (a *Application) Shutdown() { - a.appLock.Lock() - defer a.appLock.Unlock() - a.logger.Infof("signaling service to stop because of shutdown: %s", a.id) - - if a.srvState == nil { - return - } - - // destroy the application in the server, this skips sending - // the expected stopping state to the service - a.setState(state.Stopped, "Stopped", nil) - a.srvState.Destroy() - a.srvState = nil - - a.cleanUp() - a.stopCredsListener() -} - -// OnStatusChange is the handler called by the GRPC server code. -// -// It updates the status of the application and handles restarting the application when needed. -func (a *Application) OnStatusChange(s *server.ApplicationState, status proto.StateObserved_Status, msg string, payload map[string]interface{}) { - a.appLock.Lock() - defer a.appLock.Unlock() - - // If the application is stopped, do not update the state. Stopped is a final state - // and should not be overridden. - if a.state.Status == state.Stopped { - return - } - - a.setState(state.FromProto(status), msg, payload) -} - -func (a *Application) setState(s state.Status, msg string, payload map[string]interface{}) { - if a.state.Status != s || a.state.Message != msg || !reflect.DeepEqual(a.state.Payload, payload) { - if state.IsStateFiltered(msg, payload) { - return - } - - a.state.Status = s - a.state.Message = msg - a.state.Payload = payload - if a.reporter != nil { - go a.reporter.OnStateChange(a.id, a.name, a.state) - } - a.statusReporter.Update(s, msg, payload) - } -} - -func (a *Application) cleanUp() { - a.monitor.Cleanup(a.desc.Spec(), a.pipelineID) -} - -func (a *Application) startCredsListener() error { - lis, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", a.credsPort)) - if err != nil { - return errors.New(err, "failed to start connection credentials listener") - } - a.credsListener = lis - a.credsWG.Add(1) - go func() { - for { - conn, err := lis.Accept() - if err != nil { - break - } - a.appLock.Lock() - srvState := a.srvState - a.appLock.Unlock() - if srvState == nil { - // application stopped - _ = conn.Close() - continue - } - if err := srvState.WriteConnInfo(conn); err != nil { - _ = conn.Close() - if !errors.Is(err, io.EOF) { - a.logger.Errorf("failed to write connection credentials: %s", err) - } - continue - } - _ = conn.Close() - } - a.credsWG.Done() - }() - - return nil -} - -func (a *Application) stopCredsListener() { - a.credsListener.Close() - a.credsWG.Wait() - a.credsListener = nil -} diff --git a/internal/pkg/core/state/state.go b/internal/pkg/core/state/state.go deleted file mode 100644 index 57dfb639b72..00000000000 --- a/internal/pkg/core/state/state.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package state - -import ( - "context" - "strings" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/pkg/core/process" -) - -// Status describes the current status of the application process. -type Status int - -const ( - // Stopped is status describing not running application. - Stopped Status = -4 - // Crashed is status describing application is crashed. - Crashed Status = -3 - // Restarting is status describing application is restarting. - Restarting Status = -2 - // Updating is status describing application is updating. - Updating Status = -1 - - // Starting is status describing application is starting. - Starting = Status(proto.StateObserved_STARTING) - // Configuring is status describing application is configuring. - Configuring = Status(proto.StateObserved_CONFIGURING) - // Healthy is status describing application is running. - Healthy = Status(proto.StateObserved_HEALTHY) - // Degraded is status describing application is degraded. - Degraded = Status(proto.StateObserved_DEGRADED) - // Failed is status describing application is failed. - Failed = Status(proto.StateObserved_FAILED) - // Stopping is status describing application is stopping. - Stopping = Status(proto.StateObserved_STOPPING) -) - -var filteredErrors = []string{ - context.Canceled.Error(), -} - -// IsInternal returns true if the status is an internal status and not something that should be reported -// over the protocol as an actual status. -func (s Status) IsInternal() bool { - return s < Starting -} - -// ToProto converts the status to status that is compatible with the protocol. -func (s Status) ToProto() proto.StateObserved_Status { - if !s.IsInternal() { - return proto.StateObserved_Status(s) - } - if s == Updating || s == Restarting { - return proto.StateObserved_STARTING - } - if s == Crashed { - return proto.StateObserved_FAILED - } - if s == Stopped { - return proto.StateObserved_STOPPING - } - // fallback to degraded - return proto.StateObserved_DEGRADED -} - -// FromProto converts the status from protocol to status Agent representation. -func FromProto(s proto.StateObserved_Status) Status { - return Status(s) -} - -// State wraps the process state and application status. -type State struct { - ProcessInfo *process.Info - Status Status - Message string - Payload map[string]interface{} -} - -// Reporter is interface that is called when a state is changed. -type Reporter interface { - // OnStateChange is called when state changes. - OnStateChange(id string, name string, state State) -} - -// IsStateFiltered returns true if state message contains error out of predefined -// collection of ignored errors. -func IsStateFiltered(msg string, payload map[string]interface{}) bool { - for _, e := range filteredErrors { - if strings.Contains(msg, e) { - return true - } - } - return false -} diff --git a/internal/pkg/core/status/reporter.go b/internal/pkg/core/status/reporter.go deleted file mode 100644 index 04c8251fa92..00000000000 --- a/internal/pkg/core/status/reporter.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package status - -import ( - "reflect" - "sync" - - "github.com/google/uuid" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// AgentStatusCode is the status code for the Elastic Agent overall. -type AgentStatusCode int - -const ( - // Healthy status means everything is fine. - Healthy AgentStatusCode = iota - // Degraded status means something minor is preventing agent to work properly. - Degraded - // Failed status means agent is unable to work properly. - Failed -) - -// String returns the string value for the agent code. -func (s AgentStatusCode) String() string { - return []string{"online", "degraded", "error"}[s] -} - -// AgentApplicationStatus returns the status of specific application. -type AgentApplicationStatus struct { - ID string - Name string - Status state.Status - Message string - Payload map[string]interface{} -} - -// AgentStatus returns the overall status of the Elastic Agent. -type AgentStatus struct { - Status AgentStatusCode - Message string - Applications []AgentApplicationStatus -} - -// Controller takes track of component statuses. -type Controller interface { - RegisterComponent(string) Reporter - RegisterComponentWithPersistance(string, bool) Reporter - RegisterApp(id string, name string) Reporter - Status() AgentStatus - StatusCode() AgentStatusCode - StatusString() string - UpdateStateID(string) -} - -type controller struct { - mx sync.Mutex - status AgentStatusCode - reporters map[string]*reporter - appReporters map[string]*reporter - log *logger.Logger - stateID string -} - -// NewController creates a new reporter. -func NewController(log *logger.Logger) Controller { - return &controller{ - status: Healthy, - reporters: make(map[string]*reporter), - appReporters: make(map[string]*reporter), - log: log, - } -} - -// UpdateStateID cleans health when new configuration is received. -// To prevent reporting failures from previous configuration. -func (r *controller) UpdateStateID(stateID string) { - if stateID == r.stateID { - return - } - - r.mx.Lock() - - r.stateID = stateID - // cleanup status for component reporters - // the status of app reports remain the same - for _, rep := range r.reporters { - if !rep.isRegistered { - continue - } - - rep.mx.Lock() - if !rep.isPersistent { - rep.status = state.Configuring - rep.message = "" - } - rep.mx.Unlock() - } - r.mx.Unlock() - - r.updateStatus() -} - -// Register registers new component for status updates. -func (r *controller) RegisterComponent(componentIdentifier string) Reporter { - return r.RegisterComponentWithPersistance(componentIdentifier, false) -} - -// Register registers new component for status updates. -func (r *controller) RegisterComponentWithPersistance(componentIdentifier string, persistent bool) Reporter { - id := componentIdentifier + "-" + uuid.New().String()[:8] - rep := &reporter{ - name: componentIdentifier, - isRegistered: true, - unregisterFunc: func() { - r.mx.Lock() - delete(r.reporters, id) - r.mx.Unlock() - }, - notifyChangeFunc: r.updateStatus, - isPersistent: persistent, - } - - r.mx.Lock() - r.reporters[id] = rep - r.mx.Unlock() - - return rep -} - -// RegisterApp registers new component for status updates. -func (r *controller) RegisterApp(componentIdentifier string, name string) Reporter { - id := componentIdentifier + "-" + uuid.New().String()[:8] - rep := &reporter{ - name: name, - status: state.Stopped, - isRegistered: true, - unregisterFunc: func() { - r.mx.Lock() - delete(r.appReporters, id) - r.mx.Unlock() - }, - notifyChangeFunc: r.updateStatus, - } - - r.mx.Lock() - r.appReporters[id] = rep - r.mx.Unlock() - - return rep -} - -// Status retrieves current agent status. -func (r *controller) Status() AgentStatus { - r.mx.Lock() - defer r.mx.Unlock() - apps := make([]AgentApplicationStatus, 0, len(r.appReporters)) - for key, rep := range r.appReporters { - rep.mx.Lock() - apps = append(apps, AgentApplicationStatus{ - ID: key, - Name: rep.name, - Status: rep.status, - Message: rep.message, - Payload: rep.payload, - }) - rep.mx.Unlock() - } - return AgentStatus{ - Status: r.status, - Message: "", - Applications: apps, - } -} - -// StatusCode retrieves current agent status code. -func (r *controller) StatusCode() AgentStatusCode { - r.mx.Lock() - defer r.mx.Unlock() - return r.status -} - -func (r *controller) updateStatus() { - status := Healthy - - r.mx.Lock() - for id, rep := range r.reporters { - s := statusToAgentStatus(rep.status) - if s > status { - status = s - } - - r.log.Debugf("'%s' has status '%s'", id, s) - if status == Failed { - break - } - } - if status != Failed { - for id, rep := range r.appReporters { - s := statusToAgentStatus(rep.status) - if s > status { - status = s - } - - r.log.Debugf("'%s' has status '%s'", id, s) - if status == Failed { - break - } - } - } - - if r.status != status { - r.logStatus(status) - r.status = status - } - - r.mx.Unlock() - -} - -func (r *controller) logStatus(status AgentStatusCode) { - logFn := r.log.Infof - if status == Degraded { - logFn = r.log.Warnf - } else if status == Failed { - logFn = r.log.Errorf - } - - logFn("Elastic Agent status changed to: '%s'", status) -} - -// StatusString retrieves human readable string of current agent status. -func (r *controller) StatusString() string { - return r.StatusCode().String() -} - -// Reporter reports status of component -type Reporter interface { - Update(state.Status, string, map[string]interface{}) - Unregister() -} - -type reporter struct { - name string - mx sync.Mutex - isPersistent bool - isRegistered bool - status state.Status - message string - payload map[string]interface{} - unregisterFunc func() - notifyChangeFunc func() -} - -// Update updates the status of a component. -func (r *reporter) Update(s state.Status, message string, payload map[string]interface{}) { - r.mx.Lock() - defer r.mx.Unlock() - - if !r.isRegistered { - return - } - if state.IsStateFiltered(message, payload) { - return - } - - if r.status != s || r.message != message || !reflect.DeepEqual(r.payload, payload) { - r.status = s - r.message = message - r.payload = payload - r.notifyChangeFunc() - } -} - -// Unregister unregisters status from reporter. Reporter will no longer be taken into consideration -// for overall status computation. -func (r *reporter) Unregister() { - r.mx.Lock() - defer r.mx.Unlock() - - r.isRegistered = false - r.unregisterFunc() - r.notifyChangeFunc() -} - -func statusToAgentStatus(status state.Status) AgentStatusCode { - s := status.ToProto() - if s == proto.StateObserved_DEGRADED { - return Degraded - } - if s == proto.StateObserved_FAILED { - return Failed - } - return Healthy -} diff --git a/internal/pkg/core/status/reporter_test.go b/internal/pkg/core/status/reporter_test.go deleted file mode 100644 index 0d44e402798..00000000000 --- a/internal/pkg/core/status/reporter_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package status - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestReporter(t *testing.T) { - l, _ := logger.New("", false) - t.Run("healthy by default", func(t *testing.T) { - r := NewController(l) - assert.Equal(t, Healthy, r.StatusCode()) - assert.Equal(t, "online", r.StatusString()) - }) - - t.Run("healthy when all healthy", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - a1 := r.RegisterApp("app-1", "app") - a2 := r.RegisterApp("app-2", "app") - a3 := r.RegisterApp("other-1", "other") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Healthy, "", nil) - r3.Update(state.Healthy, "", nil) - a1.Update(state.Healthy, "", nil) - a2.Update(state.Healthy, "", nil) - a3.Update(state.Healthy, "", nil) - - assert.Equal(t, Healthy, r.StatusCode()) - assert.Equal(t, "online", r.StatusString()) - }) - - t.Run("degraded when one degraded", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Degraded, "degraded", nil) - r3.Update(state.Healthy, "", nil) - - assert.Equal(t, Degraded, r.StatusCode()) - assert.Equal(t, "degraded", r.StatusString()) - }) - - t.Run("failed when one failed", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Failed, "failed", nil) - r3.Update(state.Healthy, "", nil) - - assert.Equal(t, Failed, r.StatusCode()) - assert.Equal(t, "error", r.StatusString()) - }) - - t.Run("failed when one failed and one degraded", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Failed, "failed", nil) - r3.Update(state.Degraded, "degraded", nil) - - assert.Equal(t, Failed, r.StatusCode()) - assert.Equal(t, "error", r.StatusString()) - }) - - t.Run("degraded when degraded and healthy, failed unregistered", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Failed, "failed", nil) - r3.Update(state.Degraded, "degraded", nil) - - r2.Unregister() - - assert.Equal(t, Degraded, r.StatusCode()) - assert.Equal(t, "degraded", r.StatusString()) - }) -} diff --git a/internal/spec/apm-server.yml b/internal/spec/apm-server.yml deleted file mode 100644 index 0258eb9fb0f..00000000000 --- a/internal/spec/apm-server.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: APM-Server -cmd: apm-server -artifact: apm-server -args: [ - "-E", "management.enabled=true", - "-E", "gc_percent=${APMSERVER_GOGC:100}" -] -exported_metrics: [ - "apm-server", -] -rules: - - copy_to_list: - item: fleet - to: inputs - on_conflict: noop - - map: - path: fleet - rules: - - remove_key: - key: access_api_key - - remove_key: - key: reporting - - remove_key: - key: agent - - fix_stream: {} - - filter_values: - selector: inputs - key: type - values: - - apm - - filter: - selectors: - - inputs - - output - - fleet - - inject_headers: {} -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', - 'kafka', 'logstash') diff --git a/internal/spec/auditbeat.yml b/internal/spec/auditbeat.yml deleted file mode 100644 index be5d6246706..00000000000 --- a/internal/spec/auditbeat.yml +++ /dev/null @@ -1,98 +0,0 @@ -name: Auditbeat -cmd: auditbeat -args: [ - "-c", "auditbeat.elastic-agent.yml", - "-E", "setup.ilm.enabled=false", - "-E", "setup.template.enabled=false", - "-E", "management.enabled=true", - "-E", "logging.level=debug", - "-E", "gc_percent=${AUDITBEAT_GOGC:100}", - "-E", "auditbeat.config.modules.enabled=false" -] -artifact: beats/auditbeat -restart_on_output_change: true - -rules: -- fix_stream: {} - -# All Auditbeat input types begin with 'audit/'. -- filter_values_with_regexp: - key: type - re: '^audit/.+' - selector: inputs - -# Adds 'index: logs-{data_stream.dataset}-{data_stream.namespace}' to each input. -- inject_index: - type: logs - -# Adds two add_fields processors - one for event.dataset and one for -# data_stream.dataset, data_stream.type, and data_stream.namespace. -- inject_stream_processor: - on_conflict: insert_after - type: logs - -# Convert input[].streams[] into inputs[]. -- extract_list_items: - path: inputs - item: streams - to: inputs - -- map: - path: inputs - rules: - # Input types for Auditbeat begin with 'audit/'. Everything after that is - # treated as the module name. - - translate_with_regexp: - path: type - re: '^audit/(.+)' - with: $1 - - rename: - from: type - to: module - # If a dataset is specified convert that into 'datasets: [$item]'. - - make_array: - item: dataset - to: datasets - - remove_key: - key: dataset - - remove_key: - key: enabled - - remove_key: - key: data_stream - - remove_key: - key: condition - # Require all config to come through the Agent (no local files). - - remove_key: - key: audit_rule_files - -- filter_values: - selector: inputs - key: module - values: - - auditd - - file_integrity - - system - -# Adds two add_fields processors - one for agent.id and one for -# elastic_agent.id, elastic_agent.snapshot, elastic_agent.version. -- inject_agent_info: {} - -- copy: - from: inputs - to: auditbeat - -- rename: - from: auditbeat.inputs - to: modules - -- filter: - selectors: - - auditbeat - - output - - keystore - -# Inject headers into the output configuration. -- inject_headers: {} - -when: length(${auditbeat.modules}) > 0 and hasKey(${output}, 'elasticsearch', - 'redis', 'kafka', 'logstash') diff --git a/internal/spec/cloudbeat.yml b/internal/spec/cloudbeat.yml deleted file mode 100644 index 9cfda42344d..00000000000 --- a/internal/spec/cloudbeat.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Cloudbeat -cmd: cloudbeat -args: [ - "-E", "setup.ilm.enabled=false", - "-E", "setup.template.enabled=false", - "-E", "management.enabled=true", -] -restart_on_output_change: true -artifact: cloudbeat -action_input_types: - - cloudbeat - -rules: - - fix_stream: {} - - inject_index: - type: logs - - - inject_stream_processor: - on_conflict: insert_after - type: logs - - - filter_values: - selector: inputs - key: type - values: - - cloudbeat - - - inject_agent_info: {} - - - filter: - selectors: - - inputs - - output - -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', 'kafka', 'logstash') diff --git a/internal/spec/endpoint.yml b/internal/spec/endpoint.yml deleted file mode 100644 index 5f452ba1943..00000000000 --- a/internal/spec/endpoint.yml +++ /dev/null @@ -1,67 +0,0 @@ -name: Endpoint Security -cmd: endpoint-security -artifact: endpoint-dev -service: 6788 -action_input_types: -- endpoint -log_paths: - darwin: "/Library/Elastic/Endpoint/state/log/endpoint-*.log" - linux: "/opt/Elastic/Endpoint/state/log/endpoint-*.log" - windows: "C:\\Program Files\\Elastic\\Endpoint\\state\\log\\endpoint-*.log" -check_install: -- exec_file: - path: "endpoint-security" - args: - - "verify" - - "--log" - - "stderr" - timeout: 30 -post_install: -- exec_file: - path: "endpoint-security" - args: - - "install" - - "--log" - - "stderr" - - "--upgrade" - - "--resources" - - "endpoint-security-resources.zip" - timeout: 600 -pre_uninstall: -- exec_file: - path: "endpoint-security" - args: - - "uninstall" - - "--log" - - "stderr" - timeout: 600 -rules: -- fix_stream: {} - -- filter_values: - selector: inputs - key: type - values: - - endpoint - -- filter_values: - selector: inputs - key: enabled - values: - - true - -- map: - path: fleet - rules: - - remove_key: - key: server - -- filter: - selectors: - - fleet - - inputs - - output - - revision - -when: length(${fleet}) > 0 and length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'logstash') -constraints: not (${runtime.arch} == 'arm64' and ${runtime.family} == 'redhat' and ${runtime.major} == '7') diff --git a/internal/spec/filebeat.yml b/internal/spec/filebeat.yml deleted file mode 100644 index 10f8ee4493b..00000000000 --- a/internal/spec/filebeat.yml +++ /dev/null @@ -1,115 +0,0 @@ -name: Filebeat -cmd: filebeat -args: [ - "-E", "setup.ilm.enabled=false", - "-E", "setup.template.enabled=false", - "-E", "management.enabled=true", - "-E", "logging.level=debug", - "-E", "gc_percent=${FILEBEAT_GOGC:100}", - "-E", "filebeat.config.modules.enabled=false" -] -artifact: beats/filebeat -restart_on_output_change: true -rules: -- fix_stream: {} -- inject_index: - type: logs - -- inject_stream_processor: - on_conflict: insert_after - type: logs - -- map: - path: inputs - rules: - - copy_all_to_list: - to: streams - on_conflict: noop - except: ["streams", "enabled", "processors"] - - copy_to_list: - item: processors - to: streams - on_conflict: insert_before - -- rename: - from: inputs - to: inputsstreams - -- extract_list_items: - path: inputsstreams - item: streams - to: inputs - -- map: - path: inputs - rules: - - translate: - path: type - mapper: - logfile: log - event/file: log - event/stdin: stdin - event/tcp: tcp - event/udp: udp - log/docker: docker - log/redis_slowlog: redis - log/syslog: syslog - - remove_key: - key: use_output - - remove_key: - key: data_stream - - remove_key: - key: data_stream.namespace - - remove_key: - key: data_stream.dataset - -- filter_values: - selector: inputs - key: type - values: - - aws-cloudwatch - - aws-s3 - - azure-eventhub - - cloudfoundry - - container - - docker - - gcp-pubsub - - http_endpoint - - httpjson - - journald - - kafka - - log - - mqtt - - netflow - - o365audit - - redis - - stdin - - syslog - - tcp - - udp - - unix - - winlog - - filestream - -- filter_values: - selector: inputs - key: enabled - values: - - true - -- inject_agent_info: {} - -- copy: - from: inputs - to: filebeat - -- filter: - selectors: - - filebeat - - output - - keystore - -- inject_headers: {} - -when: length(${filebeat.inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', - 'kafka', 'logstash') diff --git a/internal/spec/fleet-server.yml b/internal/spec/fleet-server.yml deleted file mode 100644 index ea7af0e3b89..00000000000 --- a/internal/spec/fleet-server.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Fleet Server -cmd: fleet-server -args: ["--agent-mode"] -artifact: fleet-server -rules: - - fix_stream: {} - - - filter_values: - selector: inputs - key: type - values: - - fleet-server - - - filter_values: - selector: inputs - key: enabled - values: - - true - - - remove_key: - key: output - - - select_into: - selectors: [ fleet.server.output.elasticsearch ] - path: output - - - select_into: - selectors: [ fleet.server.policy.id ] - path: inputs.0.policy - - - insert_defaults: - selectors: - - fleet.server.host - - fleet.server.port - - fleet.server.internal_port - - fleet.server.ssl - path: inputs.0.server - - - map: - path: fleet - rules: - - filter: - selectors: - - agent - - host - - - map: - path: inputs - rules: - - remove_key: - key: use_output - - remove_key: - key: data_stream - - remove_key: - key: data_stream.namespace - - remove_key: - key: data_stream.dataset - - remove_key: - key: streams - - - filter: - selectors: - - fleet - - inputs - - output - - - inject_headers: {} - -when: length(${fleet}) > 0 and length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch') diff --git a/internal/spec/heartbeat.yml b/internal/spec/heartbeat.yml deleted file mode 100644 index ecb373cf791..00000000000 --- a/internal/spec/heartbeat.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: Heartbeat -cmd: heartbeat -args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.enabled=true", "-E", "logging.level=debug"] -artifact: beats/heartbeat -restart_on_output_change: true -rules: - - fix_stream: {} - - filter_values_with_regexp: - key: type - re: ^synthetics/.+ - selector: inputs - - filter_values: - selector: inputs - key: enabled - values: - - true - - inject_agent_info: {} - - filter: - selectors: - - inputs - - output - - keystore -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', - 'kafka', 'logstash') diff --git a/internal/spec/metricbeat.yml b/internal/spec/metricbeat.yml deleted file mode 100644 index 3a6f3a0b8f4..00000000000 --- a/internal/spec/metricbeat.yml +++ /dev/null @@ -1,98 +0,0 @@ -name: Metricbeat -cmd: metricbeat -args: [ - "-E", "setup.ilm.enabled=false", - "-E", "setup.template.enabled=false", - "-E", "management.enabled=true", - "-E", "logging.level=debug", - "-E", "gc_percent=${METRICBEAT_GOGC:100}", - "-E", "metricbeat.config.modules.enabled=false" -] -artifact: beats/metricbeat -restart_on_output_change: true -rules: -- fix_stream: {} -- inject_index: - type: metrics - -- inject_stream_processor: - on_conflict: insert_after - type: metrics - -- rename: - from: inputs - to: inputsstreams - -- map: - path: inputsstreams - rules: - - copy_all_to_list: - to: streams - on_conflict: noop - except: ["streams", "id", "enabled", "processors"] - - copy_to_list: - item: processors - to: streams - on_conflict: insert_before - -- extract_list_items: - path: inputsstreams - item: streams - to: inputs - -- filter_values_with_regexp: - key: type - re: ^.+/metrics$ - selector: inputs - -- filter_values: - selector: inputs - key: enabled - values: - - true - -- map: - path: inputs - rules: - - translate_with_regexp: - path: type - re: ^(?P.+)/metrics$ - with: $type - - rename: - from: type - to: module - - make_array: - item: metricset - to: metricsets - - remove_key: - key: metricset - - remove_key: - key: enabled - - remove_key: - key: data_stream - - remove_key: - key: data_stream.dataset - - remove_key: - key: data_stream.namespace - - remove_key: - key: use_output - -- inject_agent_info: {} - -- copy: - from: inputs - to: metricbeat - -- rename: - from: metricbeat.inputs - to: modules - -- filter: - selectors: - - metricbeat - - output - - keystore -- inject_headers: {} - -when: length(${metricbeat.modules}) > 0 and hasKey(${output}, 'elasticsearch', - 'redis', 'kafka', 'logstash') diff --git a/internal/spec/osquerybeat.yml b/internal/spec/osquerybeat.yml deleted file mode 100644 index 36e60901a34..00000000000 --- a/internal/spec/osquerybeat.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Osquerybeat -cmd: osquerybeat -args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.enabled=true", "-E", "logging.level=debug"] -restart_on_output_change: true -artifact: beats/osquerybeat -action_input_types: -- osquery - -check_install: -- exec_file: - path: "osquerybeat" - args: - - "verify" - timeout: 10 - -rules: -- fix_stream: {} -- inject_index: - type: logs - -- inject_stream_processor: - on_conflict: insert_after - type: logs - -- filter_values: - selector: inputs - key: type - values: - - osquery - -- inject_agent_info: {} - -- filter: - selectors: - - inputs - - output - -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch') -constraints: ${runtime.arch} != '386' diff --git a/internal/spec/packetbeat.yml b/internal/spec/packetbeat.yml deleted file mode 100644 index 37c2629f130..00000000000 --- a/internal/spec/packetbeat.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Packetbeat -cmd: packetbeat -args: ['-E', 'setup.ilm.enabled=false', '-E', 'setup.template.enabled=false', '-E', 'management.enabled=true', '-E', 'logging.level=debug'] -artifact: beats/packetbeat -restart_on_output_change: true -rules: - - filter_values: - selector: inputs - key: type - values: - - packet - - - inject_agent_info: {} - - - filter: - selectors: - - inputs - - output - -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', - 'kafka', 'logstash') - diff --git a/magefile.go b/magefile.go index 721071b2a0f..9ddfe7bb7fb 100644 --- a/magefile.go +++ b/magefile.go @@ -270,24 +270,17 @@ func (Build) Clean() { // TestBinaries build the required binaries for the test suite. func (Build) TestBinaries() error { - p := filepath.Join("internal", "pkg", "agent", "operation", "tests", "scripts") - p2 := filepath.Join("internal", "pkg", "agent", "transpiler", "tests") - p3 := filepath.Join("pkg", "component") - configurableName := "configurable" - serviceableName := "serviceable" + p := filepath.Join("internal", "pkg", "agent", "transpiler", "tests") + p2 := filepath.Join("pkg", "component") execName := "exec" fakeName := "fake" if runtime.GOOS == "windows" { - configurableName += ".exe" - serviceableName += ".exe" execName += ".exe" fakeName += ".exe" } return combineErr( - RunGo("build", "-o", filepath.Join(p, configurableName), filepath.Join(p, "configurable-1.0-darwin-x86_64", "main.go")), - RunGo("build", "-o", filepath.Join(p, serviceableName), filepath.Join(p, "serviceable-1.0-darwin-x86_64", "main.go")), - RunGo("build", "-o", filepath.Join(p2, "exec-1.0-darwin-x86_64", execName), filepath.Join(p2, "exec-1.0-darwin-x86_64", "main.go")), - RunGo("build", "-o", filepath.Join(p3, "fake", fakeName), filepath.Join(p3, "fake", "main.go")), + RunGo("build", "-o", filepath.Join(p, "exec-1.0-darwin-x86_64", execName), filepath.Join(p, "exec-1.0-darwin-x86_64", "main.go")), + RunGo("build", "-o", filepath.Join(p2, "fake", fakeName), filepath.Join(p2, "fake", "main.go")), ) } @@ -487,7 +480,7 @@ func commitID() string { // Update is an alias for executing control protocol, configs, and specs. func Update() { - mg.SerialDeps(Config, BuildSpec, BuildPGP, BuildFleetCfg) + mg.SerialDeps(Config, BuildPGP, BuildFleetCfg) } // CrossBuild cross-builds the beat for all target platforms. @@ -514,19 +507,6 @@ func ControlProto() error { "control.proto") } -// BuildSpec make sure that all the suppported program spec are built into the binary. -func BuildSpec() error { - // go run dev-tools/cmd/buildspec/buildspec.go --in internal/agent/spec/*.yml --out internal/pkg/agent/program/supported.go - goF := filepath.Join("dev-tools", "cmd", "buildspec", "buildspec.go") - in := filepath.Join("internal", "spec", "*.yml") - out := filepath.Join("internal", "pkg", "agent", "program", "supported.go") - - fmt.Printf(">> Buildspec from %s to %s\n", in, out) - return RunGo("run", goF, "--in", in, "--out", out) - - return nil -} - func BuildPGP() error { // go run elastic-agent/dev-tools/cmd/buildpgp/build_pgp.go --in agent/spec/GPG-KEY-elasticsearch --out elastic-agent/pkg/release/pgp.go goF := filepath.Join("dev-tools", "cmd", "buildpgp", "build_pgp.go") diff --git a/pkg/component/platforms.go b/pkg/component/platforms.go index 98e5bf21cd6..552adde716c 100644 --- a/pkg/component/platforms.go +++ b/pkg/component/platforms.go @@ -6,7 +6,11 @@ package component import ( "fmt" + goruntime "runtime" + "strconv" "strings" + + "github.com/elastic/go-sysinfo" ) const ( @@ -103,3 +107,29 @@ type PlatformDetail struct { Major string Minor string } + +// PlatformModifier can modify the platform details before the runtime specifications are loaded. +type PlatformModifier func(detail PlatformDetail) PlatformDetail + +// LoadPlatformDetail loads the platform details for the current system. +func LoadPlatformDetail(modifiers ...PlatformModifier) (PlatformDetail, error) { + info, err := sysinfo.Host() + if err != nil { + return PlatformDetail{}, err + } + os := info.Info().OS + detail := PlatformDetail{ + Platform: Platform{ + OS: goruntime.GOOS, + Arch: goruntime.GOARCH, + GOOS: goruntime.GOOS, + }, + Family: os.Family, + Major: strconv.Itoa(os.Major), + Minor: strconv.Itoa(os.Minor), + } + for _, modifier := range modifiers { + detail = modifier(detail) + } + return detail, nil +} From d636633b3125a8eff57da2a848a660626f44060f Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 21 Jul 2022 10:42:09 -0400 Subject: [PATCH 14/19] Remove more. --- internal/pkg/agent/stateresolver/resolve.go | 177 -------- .../pkg/agent/stateresolver/resolve_test.go | 396 ------------------ .../agent/stateresolver/statechange_string.go | 30 -- .../pkg/agent/stateresolver/stateresolver.go | 68 --- .../agent/stateresolver/stateresolver_test.go | 63 --- internal/pkg/tokenbucket/token_bucket.go | 85 ---- internal/pkg/tokenbucket/token_bucket_test.go | 109 ----- 7 files changed, 928 deletions(-) delete mode 100644 internal/pkg/agent/stateresolver/resolve.go delete mode 100644 internal/pkg/agent/stateresolver/resolve_test.go delete mode 100644 internal/pkg/agent/stateresolver/statechange_string.go delete mode 100644 internal/pkg/agent/stateresolver/stateresolver.go delete mode 100644 internal/pkg/agent/stateresolver/stateresolver_test.go delete mode 100644 internal/pkg/tokenbucket/token_bucket.go delete mode 100644 internal/pkg/tokenbucket/token_bucket_test.go diff --git a/internal/pkg/agent/stateresolver/resolve.go b/internal/pkg/agent/stateresolver/resolve.go deleted file mode 100644 index 5afe2256cb6..00000000000 --- a/internal/pkg/agent/stateresolver/resolve.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stateresolver - -import ( - "sort" - "strings" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/release" -) - -//go:generate stringer -type=stateChange -linecomment=true - -const shortID = 8 - -// stateChange represent a how a process is modified between configuration change. -type stateChange uint8 - -const ( - startState stateChange = iota + 1 // START - updateState // UPDATE - unchangedState // UNCHANGED -) - -// state represent the SHOULD state of the system, contains a reference to the actual bundle of -// configuration received by the upstream call and keep track of the last change executed on a program. -// -// The list of change are the following: -// start: first time to see that configuration and decide to start a new process. -// update: need to update the process switch a new configuration. -// unchanged: keep running the process with the actual configuration. -type state struct { - ID string - LastModified time.Time - Active map[string]active -} - -func (s *state) ShortID() string { - if len(s.ID) <= shortID { - return s.ID - } - return s.ID[0:shortID] -} - -func (s *state) String() string { - var str strings.Builder - str.WriteString("ID:" + s.ID + ", LastModified: " + s.LastModified.String()) - str.WriteString("Active Process [\n") - for _, a := range s.Active { - str.WriteString(a.String()) - } - str.WriteString("]") - - return str.String() -} - -type active struct { - LastChange stateChange - LastModified time.Time - Identifier string - Program program.Program -} - -func (s *active) String() string { - return "Identifier: " + s.Identifier + - ", LastChange: " + s.LastChange.String() + - ", LastModified: " + s.LastModified.String() + - ", Checksum: " + s.Program.Checksum() -} - -type cfgReq interface { - ID() string - CreatedAt() time.Time - Programs() []program.Program -} - -// Converge converges the system, take the current sate and create a new should state and all the steps -// required to go from current state to the new state. -func converge(s state, cfg cfgReq) (state, []configrequest.Step) { - newState := state{ - ID: cfg.ID(), - LastModified: cfg.CreatedAt(), - Active: make(map[string]active, len(cfg.Programs())), - } - - steps := make([]configrequest.Step, 0) - - // Find process that must be stopped. - activeKeys := getActiveKeys(s.Active) - for _, id := range activeKeys { - active := s.Active[id] - - var found bool - for _, p := range cfg.Programs() { - // Still need to run the process. - if id == p.Identifier() { - found = true - break - } - } - - if !found { - steps = append(steps, configrequest.Step{ - ID: configrequest.StepRemove, - ProgramSpec: active.Program.Spec, - Version: release.Version(), - }) - } - } - - // What need to be started or updated. - for _, p := range cfg.Programs() { - a, found := s.Active[p.Identifier()] - if !found { - newState.Active[p.Identifier()] = active{ - LastChange: startState, - LastModified: cfg.CreatedAt(), - Identifier: p.Identifier(), - Program: p, - } - - steps = append(steps, configrequest.Step{ - ID: configrequest.StepRun, - ProgramSpec: p.Spec, - Version: release.Version(), - Meta: map[string]interface{}{ - configrequest.MetaConfigKey: p.Configuration(), - }, - }) - - // Complete new process, skip to the next process. - continue - } - - // Checksum doesn't match and we force an update of the process. - if a.Program.Checksum() != p.Checksum() { - newState.Active[p.Identifier()] = active{ - LastChange: updateState, - LastModified: cfg.CreatedAt(), - Identifier: p.Identifier(), - Program: p, - } - steps = append(steps, configrequest.Step{ - ID: configrequest.StepRun, - ProgramSpec: p.Spec, - Version: release.Version(), - Meta: map[string]interface{}{ - configrequest.MetaConfigKey: p.Configuration(), - }, - }) - } else { - // Configuration did not change in this loop so we keep - // the last configuration as is. - a.LastChange = unchangedState - newState.Active[p.Identifier()] = a - } - } - - // What need to be updated. - return newState, steps -} - -func getActiveKeys(aa map[string]active) []string { - keys := make([]string, 0, len(aa)) - for k := range aa { - keys = append(keys, k) - } - - sort.Strings(keys) - - return keys -} diff --git a/internal/pkg/agent/stateresolver/resolve_test.go b/internal/pkg/agent/stateresolver/resolve_test.go deleted file mode 100644 index 4276ca39639..00000000000 --- a/internal/pkg/agent/stateresolver/resolve_test.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stateresolver - -import ( - "regexp" - "testing" - "time" - - "github.com/google/go-cmp/cmp/cmpopts" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/release" -) - -func TestResolver(t *testing.T) { - fb1 := fb("1") - fb2 := fb("2") - mb1 := mb("2") - tn := time.Now() - tn2 := time.Now().Add(time.Minute * 5) - - testcases := map[string]struct { - submit cfgReq - cur state - should state - steps []configrequest.Step - }{ - "from no programs to running program": { - submit: &cfg{ - id: "config-1", - createdAt: tn, - programs: []program.Program{ - fb1, mb1, - }, - }, - cur: state{}, // empty state - should: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRun, - ProgramSpec: fb1.Spec, - Version: release.Version(), - Meta: withMeta(fb1), - }, - { - ID: configrequest.StepRun, - ProgramSpec: mb1.Spec, - Version: release.Version(), - Meta: withMeta(mb1), - }, - }, - }, - "adding a program to an already running system": { - submit: &cfg{ - id: "config-2", - createdAt: tn2, - programs: []program.Program{ - fb1, mb1, - }, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - }, - }, - should: state{ - ID: "config-2", - LastModified: tn2, - Active: map[string]active{ - "filebeat": { - LastChange: unchangedState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn2, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRun, - ProgramSpec: mb1.Spec, - Version: release.Version(), - Meta: withMeta(mb1), - }, - }, - }, - "updating an already running program": { - submit: &cfg{ - id: "config-2", - createdAt: tn2, - programs: []program.Program{ - fb2, mb1, - }, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - }, - }, - should: state{ - ID: "config-2", - LastModified: tn2, - Active: map[string]active{ - "filebeat": { - LastChange: updateState, - LastModified: tn2, - Identifier: "filebeat", - Program: fb2, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn2, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRun, - ProgramSpec: fb2.Spec, - Version: release.Version(), - Meta: withMeta(fb2), - }, - { - ID: configrequest.StepRun, - ProgramSpec: mb1.Spec, - Version: release.Version(), - Meta: withMeta(mb1), - }, - }, - }, - "remove a running program and start a new one": { - submit: &cfg{ - id: "config-2", - createdAt: tn2, - programs: []program.Program{ - mb1, - }, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - }, - }, - should: state{ - ID: "config-2", - LastModified: tn2, - Active: map[string]active{ - "metricbeat": { - LastChange: startState, - LastModified: tn2, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRemove, - ProgramSpec: fb1.Spec, - Version: release.Version(), - }, - { - ID: configrequest.StepRun, - ProgramSpec: mb1.Spec, - Version: release.Version(), - Meta: withMeta(mb1), - }, - }, - }, - "stops all runnings programs": { - submit: &cfg{ - id: "config-2", - createdAt: tn2, - programs: []program.Program{}, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - should: state{ - ID: "config-2", - LastModified: tn2, - Active: map[string]active{}, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRemove, - ProgramSpec: fb1.Spec, - Version: release.Version(), - }, - { - ID: configrequest.StepRemove, - ProgramSpec: mb1.Spec, - Version: release.Version(), - }, - }, - }, - "no changes detected": { - submit: &cfg{ - id: "config-1", - createdAt: tn, - programs: []program.Program{ - fb1, mb1, - }, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - should: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: unchangedState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: unchangedState, - LastModified: tn, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{}, - }, - } - - for name, test := range testcases { - t.Run(name, func(t *testing.T) { - should, steps := converge(test.cur, test.submit) - - require.Equal(t, test.should.ID, should.ID) - require.Equal(t, test.should.LastModified, should.LastModified) - - require.Equal(t, len(test.steps), len(steps), "steps count don't match") - require.Equal(t, len(test.should.Active), len(should.Active), "active count don't match") - - for id, a := range test.should.Active { - compare := should.Active[id] - require.Equal(t, a.LastModified, compare.LastModified) - require.Equal(t, a.Identifier, compare.Identifier) - require.Equal(t, a.LastChange, compare.LastChange) - require.Equal(t, a.Program.Checksum(), compare.Program.Checksum()) - } - - if diff := cmp.Diff(test.steps, steps, cmpopts.IgnoreUnexported(regexp.Regexp{})); diff != "" { - t.Errorf("converge() mismatch (-want +got):\n%s", diff) - } - }) - } -} - -type cfg struct { - id string - createdAt time.Time - programs []program.Program -} - -func (c *cfg) ID() string { - return c.id -} - -func (c *cfg) ShortID() string { - return c.id -} - -func (c *cfg) Programs() []program.Program { - return c.programs -} - -func (c *cfg) CreatedAt() time.Time { - return c.createdAt -} - -func (c *cfg) ProgramNames() []string { - names := make([]string, 0, len(c.programs)) - for _, name := range c.programs { - names = append(names, name.Spec.Name) - } - return names -} - -func p(identifier, checksum string) program.Program { - s, ok := program.FindSpecByName(identifier) - if !ok { - panic("can't find spec with identifier " + identifier) - } - return program.Program{ - Spec: s, - Config: transpiler.MustNewAST(map[string]interface{}{ - s.Name: map[string]interface{}{ - "checksum": checksum, // make sure checksum is different between configuration change. - }, - }), - } -} - -func fb(checksum string) program.Program { - return p("Filebeat", checksum) -} - -func mb(checksum string) program.Program { - return p("Metricbeat", checksum) -} - -func withMeta(prog program.Program) map[string]interface{} { - return map[string]interface{}{ - configrequest.MetaConfigKey: prog.Configuration(), - } -} diff --git a/internal/pkg/agent/stateresolver/statechange_string.go b/internal/pkg/agent/stateresolver/statechange_string.go deleted file mode 100644 index 53175471789..00000000000 --- a/internal/pkg/agent/stateresolver/statechange_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// Code generated by "stringer -type=stateChange -linecomment=true"; DO NOT EDIT. - -package stateresolver - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[startState-1] - _ = x[updateState-2] - _ = x[unchangedState-3] -} - -const _stateChange_name = "STARTUPDATEUNCHANGE" - -var _stateChange_index = [...]uint8{0, 5, 11, 19} - -func (i stateChange) String() string { - i -= 1 - if i >= stateChange(len(_stateChange_index)-1) { - return "stateChange(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _stateChange_name[_stateChange_index[i]:_stateChange_index[i+1]] -} diff --git a/internal/pkg/agent/stateresolver/stateresolver.go b/internal/pkg/agent/stateresolver/stateresolver.go deleted file mode 100644 index d63cc482cde..00000000000 --- a/internal/pkg/agent/stateresolver/stateresolver.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stateresolver - -import ( - "sync" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - uid "github.com/elastic/elastic-agent/internal/pkg/id" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// Acker allow to ack the should state from a converge operation. -type Acker func() - -// StateResolver is a resolver of a config state change -// it subscribes to Config event and publishes StateChange events based on that/ -// Based on StateChange event operator know what to do. -type StateResolver struct { - l *logger.Logger - curState state - mu sync.Mutex -} - -// NewStateResolver allow to modify default event names. -func NewStateResolver(log *logger.Logger) (*StateResolver, error) { - return &StateResolver{ - l: log, - }, nil -} - -// Resolve resolves passed config into one or multiple steps -func (s *StateResolver) Resolve( - cfg configrequest.Request, -) (uid.ID, string, []configrequest.Step, Acker, error) { - s.mu.Lock() - defer s.mu.Unlock() - - newState, steps := converge(s.curState, cfg) - newStateID := newState.ShortID() - id, err := uid.Generate() - if err != nil { - return id, newStateID, nil, nil, err - } - - s.l.Infof("New State ID is %s", newStateID) - s.l.Infof("Converging state requires execution of %d step(s)", len(steps)) - for i, step := range steps { - // more detailed debug log - s.l.Debugf("step %d: %s", i, step.String()) - } - - // Allow the operator to ack the should state when applying the steps is done correctly. - ack := func() { - s.ack(newState) - } - - return id, newStateID, steps, ack, nil -} - -func (s *StateResolver) ack(newState state) { - s.mu.Lock() - defer s.mu.Unlock() - s.l.Info("Updating internal state") - s.curState = newState -} diff --git a/internal/pkg/agent/stateresolver/stateresolver_test.go b/internal/pkg/agent/stateresolver/stateresolver_test.go deleted file mode 100644 index ad67725e6a7..00000000000 --- a/internal/pkg/agent/stateresolver/stateresolver_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stateresolver - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestStateResolverAcking(t *testing.T) { - submit := &cfg{ - id: "config-1", - createdAt: time.Now(), - programs: []program.Program{ - fb("1"), mb("1"), - }, - } - - t.Run("when we ACK the should state", func(t *testing.T) { - log, _ := logger.New("", false) - r, err := NewStateResolver(log) - require.NoError(t, err) - - // Current state is empty. - _, _, steps, ack, err := r.Resolve(submit) - require.NoError(t, err) - require.Equal(t, 2, len(steps)) - - // Ack the should state. - ack() - - // Current sate is not empty lets try to resolve the same configuration. - _, _, steps, _, err = r.Resolve(submit) - require.NoError(t, err) - require.Equal(t, 0, len(steps)) - }) - - t.Run("when we don't ACK the should state", func(t *testing.T) { - log, _ := logger.New("", false) - r, err := NewStateResolver(log) - require.NoError(t, err) - - // Current state is empty. - _, _, steps1, _, err := r.Resolve(submit) - require.NoError(t, err) - require.Equal(t, 2, len(steps1)) - - // We didn't ACK the should state, verify that resolve produce the same output. - _, _, steps2, _, err := r.Resolve(submit) - require.NoError(t, err) - require.Equal(t, 2, len(steps2)) - - assert.Equal(t, steps1, steps2) - }) -} diff --git a/internal/pkg/tokenbucket/token_bucket.go b/internal/pkg/tokenbucket/token_bucket.go deleted file mode 100644 index b530d238e3b..00000000000 --- a/internal/pkg/tokenbucket/token_bucket.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package tokenbucket - -import ( - "context" - "fmt" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/scheduler" -) - -// Bucket is a Token Bucket for rate limiting -type Bucket struct { - dropAmount int - rateChan chan struct{} - closeChan chan struct{} - scheduler scheduler.Scheduler -} - -// NewTokenBucket creates a bucket and starts it. -// size: total size of the bucket -// dropAmount: amount which is dropped per every specified interval -// dropRate: specified interval when drop will happen -func NewTokenBucket(ctx context.Context, size, dropAmount int, dropRate time.Duration) (*Bucket, error) { - s := scheduler.NewPeriodic(dropRate) - return newTokenBucketWithScheduler(ctx, size, dropAmount, s) -} - -func newTokenBucketWithScheduler( - ctx context.Context, - size, dropAmount int, - s scheduler.Scheduler, -) (*Bucket, error) { - if dropAmount > size { - return nil, fmt.Errorf( - "TokenBucket: invalid configuration, size '%d' is lower than drop amount '%d'", - size, - dropAmount, - ) - } - - b := &Bucket{ - dropAmount: dropAmount, - rateChan: make(chan struct{}, size), - closeChan: make(chan struct{}), - scheduler: s, - } - go b.run(ctx) - - return b, nil -} - -// Add adds item into a bucket. Add blocks until it is able to add item into a bucket. -func (b *Bucket) Add() { - b.rateChan <- struct{}{} -} - -// Close stops the rate limiting and does not let pass anything anymore. -func (b *Bucket) Close() { - close(b.closeChan) - close(b.rateChan) - b.scheduler.Stop() -} - -// run runs basic loop and consumes configured tokens per every configured period. -func (b *Bucket) run(ctx context.Context) { - for { - select { - case <-b.scheduler.WaitTick(): - for i := 0; i < b.dropAmount; i++ { - select { - case <-b.rateChan: - default: // do not cumulate drops - } - } - case <-b.closeChan: - return - case <-ctx.Done(): - return - } - } -} diff --git a/internal/pkg/tokenbucket/token_bucket_test.go b/internal/pkg/tokenbucket/token_bucket_test.go deleted file mode 100644 index e38a4f3f149..00000000000 --- a/internal/pkg/tokenbucket/token_bucket_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package tokenbucket - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent/internal/pkg/scheduler" -) - -func TestTokenBucket(t *testing.T) { - dropAmount := 1 - bucketSize := 3 - - t.Run("when way below the bucket size it should not block", func(t *testing.T) { - stepper := scheduler.NewStepper() - - b, err := newTokenBucketWithScheduler( - context.Background(), - bucketSize, - dropAmount, - stepper, - ) - - assert.NoError(t, err, "initiating a bucket failed") - - // Below the bucket size and should not block. - b.Add() - }) - - t.Run("when below the bucket size it should not block", func(t *testing.T) { - stepper := scheduler.NewStepper() - - b, err := newTokenBucketWithScheduler( - context.Background(), - bucketSize, - dropAmount, - stepper, - ) - - assert.NoError(t, err, "initiating a bucket failed") - - // Below the bucket size and should not block. - b.Add() - b.Add() - }) - - t.Run("when we hit the bucket size it should block", func(t *testing.T) { - stepper := scheduler.NewStepper() - - b, err := newTokenBucketWithScheduler( - context.Background(), - bucketSize, - dropAmount, - stepper, - ) - - assert.NoError(t, err, "initiating a bucket failed") - - // Same as the bucket size and should block. - b.Add() - b.Add() - b.Add() - - // Out of bound unblock calls - unblock := func() { - var wg sync.WaitGroup - wg.Add(1) - go func(wg *sync.WaitGroup) { - wg.Done() - - // will unblock the next Add after a second. - <-time.After(1 * time.Second) - stepper.Next() - }(&wg) - wg.Wait() - } - - unblock() - b.Add() // Should block and be unblocked, if not unblock test will timeout. - unblock() - b.Add() // Should block and be unblocked, if not unblock test will timeout. - }) - - t.Run("When we use a timer scheduler we can unblock", func(t *testing.T) { - d := 1 * time.Second - b, err := NewTokenBucket( - context.Background(), - bucketSize, - dropAmount, - d, - ) - - assert.NoError(t, err, "initiating a bucket failed") - - // Same as the bucket size and should block. - b.Add() - b.Add() - b.Add() - b.Add() // Should block and be unblocked, if not unblock test will timeout. - }) -} From b32ea2acb2faba46b0d33dab58c06ea26636fbcf Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 21 Jul 2022 10:50:47 -0400 Subject: [PATCH 15/19] Delete more unused code. --- dev-tools/mage/common.go | 2 + dev-tools/mage/pkgtypes.go | 1 + internal/pkg/agent/configuration/fleet.go | 19 +- internal/pkg/agent/configuration/settings.go | 3 - internal/pkg/agent/control/server/server.go | 11 - internal/pkg/core/retry/config.go | 41 --- internal/pkg/core/retry/error.go | 30 --- internal/pkg/core/retry/retrystrategy.go | 123 --------- internal/pkg/core/retry/retrystrategy_test.go | 183 ------------- internal/pkg/reporter/backend.go | 13 - internal/pkg/reporter/event.go | 30 --- internal/pkg/reporter/fleet/config/config.go | 19 -- internal/pkg/reporter/fleet/reporter.go | 175 ------------- internal/pkg/reporter/fleet/reporter_test.go | 241 ------------------ internal/pkg/reporter/log/format.go | 59 ----- internal/pkg/reporter/log/reporter.go | 57 ----- internal/pkg/reporter/log/reporter_test.go | 98 ------- internal/pkg/reporter/noop/reporter.go | 28 -- internal/pkg/reporter/reporter.go | 157 ------------ internal/pkg/reporter/reporter_test.go | 120 --------- 20 files changed, 11 insertions(+), 1399 deletions(-) delete mode 100644 internal/pkg/core/retry/config.go delete mode 100644 internal/pkg/core/retry/error.go delete mode 100644 internal/pkg/core/retry/retrystrategy.go delete mode 100644 internal/pkg/core/retry/retrystrategy_test.go delete mode 100644 internal/pkg/reporter/backend.go delete mode 100644 internal/pkg/reporter/event.go delete mode 100644 internal/pkg/reporter/fleet/config/config.go delete mode 100644 internal/pkg/reporter/fleet/reporter.go delete mode 100644 internal/pkg/reporter/fleet/reporter_test.go delete mode 100644 internal/pkg/reporter/log/format.go delete mode 100644 internal/pkg/reporter/log/reporter.go delete mode 100644 internal/pkg/reporter/log/reporter_test.go delete mode 100644 internal/pkg/reporter/noop/reporter.go delete mode 100644 internal/pkg/reporter/reporter.go delete mode 100644 internal/pkg/reporter/reporter_test.go diff --git a/dev-tools/mage/common.go b/dev-tools/mage/common.go index 4cee3270fe1..52152856389 100644 --- a/dev-tools/mage/common.go +++ b/dev-tools/mage/common.go @@ -763,6 +763,8 @@ func CreateSHA512File(file string) error { //nolint:gosec // permissions are correct return os.WriteFile(file+".sha512", []byte(out), 0644) } + +// GetSHA512Hash returns SHA512 hash of file. func GetSHA512Hash(file string) (string, error) { f, err := os.Open(file) if err != nil { diff --git a/dev-tools/mage/pkgtypes.go b/dev-tools/mage/pkgtypes.go index 1ae6fc9e148..a4213fee784 100644 --- a/dev-tools/mage/pkgtypes.go +++ b/dev-tools/mage/pkgtypes.go @@ -3,6 +3,7 @@ // you may not use this file except in compliance with the Elastic License. //nolint:goconst // avoiding const check for Deb/Zip + package mage import ( diff --git a/internal/pkg/agent/configuration/fleet.go b/internal/pkg/agent/configuration/fleet.go index 5bc9c115a63..0ae59c8f4e8 100644 --- a/internal/pkg/agent/configuration/fleet.go +++ b/internal/pkg/agent/configuration/fleet.go @@ -7,18 +7,16 @@ package configuration import ( "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/remote" - fleetreporterConfig "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" ) // FleetAgentConfig is the internal configuration of the agent after the enrollment is done, // this configuration is not exposed in anyway in the elastic-agent.yml and is only internal configuration. type FleetAgentConfig struct { - Enabled bool `config:"enabled" yaml:"enabled"` - AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` - Client remote.Config `config:",inline" yaml:",inline"` - Reporting *fleetreporterConfig.Config `config:"reporting" yaml:"reporting"` - Info *AgentInfo `config:"agent" yaml:"agent"` - Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` + Enabled bool `config:"enabled" yaml:"enabled"` + AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` + Client remote.Config `config:",inline" yaml:",inline"` + Info *AgentInfo `config:"agent" yaml:"agent"` + Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` } // Valid validates the required fields for accessing the API. @@ -44,9 +42,8 @@ func (e *FleetAgentConfig) Valid() error { // DefaultFleetAgentConfig creates a default configuration for fleet. func DefaultFleetAgentConfig() *FleetAgentConfig { return &FleetAgentConfig{ - Enabled: false, - Client: remote.DefaultClientConfig(), - Reporting: fleetreporterConfig.DefaultConfig(), - Info: &AgentInfo{}, + Enabled: false, + Client: remote.DefaultClientConfig(), + Info: &AgentInfo{}, } } diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 8f515d21138..eab16a8177d 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -10,7 +10,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/process" ) @@ -23,7 +22,6 @@ type SettingsConfig struct { DownloadConfig *artifact.Config `yaml:"download" config:"download" json:"download"` ProcessConfig *process.Config `yaml:"process" config:"process" json:"process"` GRPC *GRPCConfig `yaml:"grpc" config:"grpc" json:"grpc"` - RetryConfig *retry.Config `yaml:"retry" config:"retry" json:"retry"` MonitoringConfig *monitoringCfg.MonitoringConfig `yaml:"monitoring" config:"monitoring" json:"monitoring"` LoggingConfig *logger.Config `yaml:"logging,omitempty" config:"logging,omitempty" json:"logging,omitempty"` @@ -36,7 +34,6 @@ type SettingsConfig struct { func DefaultSettingsConfig() *SettingsConfig { return &SettingsConfig{ ProcessConfig: process.DefaultConfig(), - RetryConfig: retry.DefaultConfig(), DownloadConfig: artifact.DefaultConfig(), LoggingConfig: logger.DefaultLoggingConfig(), MonitoringConfig: monitoringCfg.DefaultConfig(), diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 620a5b7b024..6aeb37e3541 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -19,7 +19,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/control" "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -40,16 +39,6 @@ type Server struct { lock sync.RWMutex } -type specer interface { - Specs() map[string]program.Spec -} - -type specInfo struct { - spec program.Spec - app string - rk string -} - // New creates a new control protocol server. func New(log *logger.Logger, cfg *monitoringCfg.MonitoringConfig, coord *coordinator.Coordinator, tracer *apm.Tracer) *Server { return &Server{ diff --git a/internal/pkg/core/retry/config.go b/internal/pkg/core/retry/config.go deleted file mode 100644 index 11cd1e7b418..00000000000 --- a/internal/pkg/core/retry/config.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package retry - -import "time" - -const ( - defaultRetriesCount = 3 - defaultDelay = 30 * time.Second - defaultMaxDelay = 5 * time.Minute -) - -// Config is a configuration of a strategy -type Config struct { - // Enabled determines whether retry is possible. Default is false. - Enabled bool `yaml:"enabled" config:"enabled"` - // RetriesCount specifies number of retries. Default is 3. - // Retry count of 1 means it will be retried one time after one failure. - RetriesCount int `yaml:"retriesCount" config:"retriesCount"` - // Delay specifies delay in ms between retries. Default is 30s - Delay time.Duration `yaml:"delay" config:"delay"` - // MaxDelay specifies maximum delay in ms between retries. Default is 300s (5min) - MaxDelay time.Duration `yaml:"maxDelay" config:"maxDelay"` - // Exponential determines whether delay is treated as exponential. - // With 30s delay and 3 retries: 30, 60, 120s - // Default is false - Exponential bool `yaml:"exponential" config:"exponential"` -} - -// DefaultConfig creates a config with pre-set default values. -func DefaultConfig() *Config { - return &Config{ - Enabled: false, - RetriesCount: 3, - Delay: 30 * time.Second, - MaxDelay: 5 * time.Minute, - Exponential: false, - } -} diff --git a/internal/pkg/core/retry/error.go b/internal/pkg/core/retry/error.go deleted file mode 100644 index b5ef3bda746..00000000000 --- a/internal/pkg/core/retry/error.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package retry - -// Fatal in retry package is an interface each error needs to implement -// in order to say whether or not it is fatal. -type Fatal interface { - Fatal() bool -} - -// FatalError wraps an error and is always fatal -type FatalError struct { - error -} - -// Fatal determines whether or not error is fatal -func (*FatalError) Fatal() bool { - return true -} - -// ErrorMakeFatal is a shorthand for making an error fatal -func ErrorMakeFatal(err error) error { - if err == nil { - return err - } - - return FatalError{err} -} diff --git a/internal/pkg/core/retry/retrystrategy.go b/internal/pkg/core/retry/retrystrategy.go deleted file mode 100644 index 4dd19c7c5d8..00000000000 --- a/internal/pkg/core/retry/retrystrategy.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package retry - -import ( - "context" - "errors" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/core/backoff" -) - -// DoWithBackoff ignores retry config of delays and lets backoff decide how much time it needs. -func DoWithBackoff(config *Config, b backoff.Backoff, fn func() error, fatalErrors ...error) error { - retryCount := getRetryCount(config) - var err error - - for retryNo := 0; retryNo <= retryCount; retryNo++ { - err = fn() - if err == nil || isFatal(err, fatalErrors...) { - b.Reset() - return err - } - - if retryNo < retryCount { - b.Wait() - } - } - - return err -} - -// Do runs provided function in a manner specified in retry configuration -func Do(ctx context.Context, config *Config, fn func(ctx context.Context) error, fatalErrors ...error) error { - retryCount := getRetryCount(config) - var err error - -RETRY_LOOP: - for retryNo := 0; retryNo <= retryCount; retryNo++ { - if ctx.Err() != nil { - break - } - - err = fn(ctx) - if err == nil { - return nil - } - - if isFatal(err, fatalErrors...) { - return err - } - - if retryNo < retryCount { - t := time.NewTimer(getDelayDuration(config, retryNo)) - select { - case <-t.C: - case <-ctx.Done(): - t.Stop() - break RETRY_LOOP - } - } - } - - return err -} - -func getRetryCount(config *Config) int { - if config == nil { - return defaultRetriesCount - } - - if !config.Enabled { - return 0 - } - - if config.RetriesCount > 0 { - return config.RetriesCount - } - - return defaultRetriesCount -} - -func getDelayDuration(config *Config, retryNo int) time.Duration { - delay := defaultDelay - - if config != nil { - if config.Delay > 0 { - delay = config.Delay - } - - if config.Exponential { - delay = time.Duration(delay.Nanoseconds() * int64(retryNo+1)) - } - } - - maxDelay := config.MaxDelay - if maxDelay == 0 { - maxDelay = defaultMaxDelay - } - if delay > maxDelay { - delay = maxDelay - } - return time.Duration(delay) -} - -// Error is fatal either if it implements Error interface and says so -// or if it is equal to one of the fatal values provided -func isFatal(err error, fatalErrors ...error) bool { - if fatalerr, ok := err.(Fatal); ok { // nolint:errorlint // Non obvious handling, we will refactor that module for v2. - return fatalerr.Fatal() - } - - for _, e := range fatalErrors { - if errors.Is(e, err) { - return true - } - } - - // What does not match criteria is considered transient - return false -} diff --git a/internal/pkg/core/retry/retrystrategy_test.go b/internal/pkg/core/retry/retrystrategy_test.go deleted file mode 100644 index f6df5258ad2..00000000000 --- a/internal/pkg/core/retry/retrystrategy_test.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package retry - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/core/backoff" -) - -func TestRetry(t *testing.T) { - type testCase struct { - Fn func(context.Context) error - ExpectedExecutions int64 - IsErrExpected bool - Enabled bool - Exponential bool - } - - errFatal := errors.New("fatal") - var executions int64 - - testCases := map[string]testCase{ - "not-failing": testCase{Fn: func(_ context.Context) error { executions++; return nil }, ExpectedExecutions: 1, Enabled: true}, - "failing": testCase{Fn: func(_ context.Context) error { executions++; return errors.New("fail") }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, - "fatal-by-enum": testCase{Fn: func(_ context.Context) error { executions++; return errFatal }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, - "fatal-by-iface": testCase{Fn: func(_ context.Context) error { executions++; return ErrFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, - "not-fatal-by-iface": testCase{Fn: func(_ context.Context) error { executions++; return ErrNotFatal{} }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, - - "dis-not-failing": testCase{Fn: func(_ context.Context) error { executions++; return nil }, ExpectedExecutions: 1, Enabled: false}, - "dis-failing": testCase{Fn: func(_ context.Context) error { executions++; return errors.New("fail") }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, - "dis-fatal-by-enum": testCase{Fn: func(_ context.Context) error { executions++; return errFatal }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, - "dis-fatal-by-iface": testCase{Fn: func(_ context.Context) error { executions++; return ErrFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, - "dis-not-fatal-by-iface": testCase{Fn: func(_ context.Context) error { executions++; return ErrNotFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, - - "failing-exp": testCase{Fn: func(_ context.Context) error { executions++; return errors.New("fail") }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true, Exponential: true}, - } - - config := &Config{ - RetriesCount: 3, - Delay: 500 * time.Millisecond, - } - - for n, tc := range testCases { - testFn := tc.Fn - executions = 0 - config.Enabled = tc.Enabled - config.Exponential = tc.Exponential - - startTime := time.Now() - err := Do(context.Background(), config, testFn, errFatal) - - executionTime := time.Since(startTime) - minExecutionTime := getMinExecutionTime(config.Delay, tc.ExpectedExecutions, tc.Exponential) - maxExecutionTime := getMaxExecutionTime(config.Delay, tc.ExpectedExecutions, tc.Exponential) - if tc.ExpectedExecutions > 1 && (executionTime < minExecutionTime || executionTime > maxExecutionTime) { - t.Fatalf("[%s]: expecting execution time between %d-%d ns, got: %v", n, minExecutionTime, maxExecutionTime, executionTime) - } - - if (err == nil) == tc.IsErrExpected { - t.Fatalf("[%s]: expecting error, got: %v", n, err) - } - - if executions != tc.ExpectedExecutions { - t.Fatalf("[%s]: expecting %d executions, got: %d", n, tc.ExpectedExecutions, executions) - } - } -} - -func TestRetryWithBackoff(t *testing.T) { - type testCase struct { - Fn func() error - ExpectedExecutions int - IsErrExpected bool - Enabled bool - } - - errFatal := errors.New("fatal") - executions := 0 - - testCases := map[string]testCase{ - "not-failing": testCase{Fn: func() error { executions++; return nil }, ExpectedExecutions: 1, Enabled: true}, - "failing": testCase{Fn: func() error { executions++; return errors.New("fail") }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, - "fatal-by-enum": testCase{Fn: func() error { executions++; return errFatal }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, - "fatal-by-iface": testCase{Fn: func() error { executions++; return ErrFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, - "not-fatal-by-iface": testCase{Fn: func() error { executions++; return ErrNotFatal{} }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, - } - - config := &Config{ - RetriesCount: 3, - Delay: 5000, - } - maxDelay := time.Duration(config.Delay) * time.Millisecond - - done := make(chan struct{}) - maxWaitTime := 200 * time.Millisecond - minWaitTime := 50 * time.Millisecond - backoff := backoff.NewEqualJitterBackoff(done, minWaitTime, maxWaitTime) - - for n, tc := range testCases { - testFn := tc.Fn - executions = 0 - config.Enabled = tc.Enabled - - startTime := time.Now() - err := DoWithBackoff(config, backoff, testFn, errFatal) - - executionTime := time.Since(startTime) - minExecTime := getBackoffMinTime(minWaitTime, maxWaitTime, tc.ExpectedExecutions) - if tc.ExpectedExecutions > 1 && (executionTime < minExecTime || executionTime > maxDelay) { - t.Fatalf("[%s]: expecting execution time between %d-%d ns, got: %v", n, minExecTime, maxDelay, executionTime) - } - - if (err == nil) == tc.IsErrExpected { - t.Fatalf("[%s]: expecting error, got: %v", n, err) - } - - if executions != tc.ExpectedExecutions { - t.Fatalf("[%s]: expecting %d executions, got: %d", n, tc.ExpectedExecutions, executions) - } - } -} - -type ErrFatal struct{ error } - -func (ErrFatal) Fatal() bool { - return true -} - -type ErrNotFatal struct{ error } - -func (ErrNotFatal) Fatal() bool { - return false -} - -func getMaxExecutionTime(delayDuration time.Duration, executions int64, exponential bool) time.Duration { - delay := delayDuration.Nanoseconds() - execTime := (executions)*delay + (delay / 2) - if exponential { - execTime = 0 - for i := int64(0); i < executions; i++ { - execTime += i * delay - } - execTime += (delay / 2) * executions - } - - return time.Duration(execTime) -} - -func getMinExecutionTime(delayDuration time.Duration, executions int64, exponential bool) time.Duration { - delay := delayDuration.Nanoseconds() - execTime := (executions-1)*delay - (delay / 2) - if exponential { - execTime = 0 - for i := int64(0); i < executions; i++ { - execTime += i * delay - } - execTime -= (delay / 2) - } - - if execTime < 0 { - execTime = 0 - } - return time.Duration(execTime) -} - -func getBackoffMinTime(delay time.Duration, maxWaitTime time.Duration, executions int) time.Duration { - var duration time.Duration - for i := 1; i < executions; i++ { - duration += delay - delay *= 2 - if delay > maxWaitTime { - delay = maxWaitTime - } - } - - return duration -} diff --git a/internal/pkg/reporter/backend.go b/internal/pkg/reporter/backend.go deleted file mode 100644 index 39ee2bcda5b..00000000000 --- a/internal/pkg/reporter/backend.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package reporter - -import "context" - -// Backend defines a actual implementation of reporting. -type Backend interface { - Report(context.Context, Event) error - Close() error -} diff --git a/internal/pkg/reporter/event.go b/internal/pkg/reporter/event.go deleted file mode 100644 index dff0c1a89a2..00000000000 --- a/internal/pkg/reporter/event.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package reporter - -import "time" - -// Event is a reported event. -type Event interface { - Type() string - SubType() string - Time() time.Time - Message() string - Payload() map[string]interface{} -} - -type event struct { - eventype string - subType string - timestamp time.Time - message string - payload map[string]interface{} -} - -func (e event) Type() string { return e.eventype } -func (e event) SubType() string { return e.subType } -func (e event) Time() time.Time { return e.timestamp } -func (e event) Message() string { return e.message } -func (e event) Payload() map[string]interface{} { return e.payload } diff --git a/internal/pkg/reporter/fleet/config/config.go b/internal/pkg/reporter/fleet/config/config.go deleted file mode 100644 index 1e42b956ee8..00000000000 --- a/internal/pkg/reporter/fleet/config/config.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package config - -// Config is a configuration describing fleet connected parts -type Config struct { - Threshold int `yaml:"threshold" config:"threshold" validate:"min=1"` - ReportingCheckFrequency int `yaml:"check_frequency_sec" config:"check_frequency_sec" validate:"min=1"` -} - -// DefaultConfig initiates FleetManagementConfig with default values -func DefaultConfig() *Config { - return &Config{ - Threshold: 10000, - ReportingCheckFrequency: 30, - } -} diff --git a/internal/pkg/reporter/fleet/reporter.go b/internal/pkg/reporter/fleet/reporter.go deleted file mode 100644 index d334a9b45cd..00000000000 --- a/internal/pkg/reporter/fleet/reporter.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "context" - "sync" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/reporter" - "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type event struct { - AgentID string `json:"agent_id"` - EventType string `json:"type"` - Ts fleetapi.Time `json:"timestamp"` - SubType string `json:"subtype"` - Msg string `json:"message"` - Payload map[string]interface{} `json:"payload,omitempty"` -} - -func (e *event) Type() string { - return e.EventType -} - -func (e *event) Timestamp() time.Time { - return time.Time(e.Ts) -} - -func (e *event) Message() string { - return e.Msg -} - -// Reporter is a reporter without any effects, serves just as a showcase for further implementations. -type Reporter struct { - info agentInfo - logger *logger.Logger - queue []fleetapi.SerializableEvent - qlock sync.Mutex - threshold int - lastAck time.Time -} - -type agentInfo interface { - AgentID() string -} - -// NewReporter creates a new fleet reporter. -func NewReporter(agentInfo agentInfo, l *logger.Logger, c *config.Config) (*Reporter, error) { - r := &Reporter{ - info: agentInfo, - queue: make([]fleetapi.SerializableEvent, 0), - logger: l, - threshold: c.Threshold, - } - - return r, nil -} - -// Report enqueue event into reporter queue. -func (r *Reporter) Report(ctx context.Context, e reporter.Event) error { - r.qlock.Lock() - defer r.qlock.Unlock() - - r.queue = append(r.queue, &event{ - AgentID: r.info.AgentID(), - EventType: e.Type(), - Ts: fleetapi.Time(e.Time()), - SubType: e.SubType(), - Msg: e.Message(), - Payload: e.Payload(), - }) - - if r.threshold > 0 && len(r.queue) > r.threshold { - // drop some low importance event if needed - r.dropEvent() - } - - return nil -} - -// Events returns a list of event from a queue and a ack function -// which clears those events once caller is done with processing. -func (r *Reporter) Events() ([]fleetapi.SerializableEvent, func()) { - r.qlock.Lock() - defer r.qlock.Unlock() - - cp := r.queueCopy() - - ackFn := func() { - // as time is monotonic and this is on single machine this should be ok. - r.clear(cp, time.Now()) - } - - return cp, ackFn -} - -func (r *Reporter) clear(items []fleetapi.SerializableEvent, ackTime time.Time) { - r.qlock.Lock() - defer r.qlock.Unlock() - - if ackTime.Sub(r.lastAck) <= 0 || - len(r.queue) == 0 || - items == nil || - len(items) == 0 { - return - } - - var dropIdx int - r.lastAck = ackTime - itemsLen := len(items) - -OUTER: - for idx := itemsLen - 1; idx >= 0; idx-- { - for i, v := range r.queue { - if v == items[idx] { - dropIdx = i - break OUTER - } - } - } - - r.queue = r.queue[dropIdx+1:] -} - -// Close stops all the background jobs reporter is running. -// Guards against panic of closing channel multiple times. -func (r *Reporter) Close() error { - return nil -} - -func (r *Reporter) queueCopy() []fleetapi.SerializableEvent { - size := len(r.queue) - batch := make([]fleetapi.SerializableEvent, size) - - copy(batch, r.queue) - return batch -} - -func (r *Reporter) dropEvent() { - if dropped := r.tryDropInfo(); !dropped { - r.dropFirst() - } -} - -// tryDropInfo returns true if info was found and dropped. -func (r *Reporter) tryDropInfo() bool { - for i, e := range r.queue { - if e.Type() != reporter.EventTypeError { - r.queue = append(r.queue[:i], r.queue[i+1:]...) - r.logger.Infof("fleet reporter dropped event because threshold[%d] was reached: %v", r.threshold, e) - return true - } - } - - return false -} - -func (r *Reporter) dropFirst() { - if len(r.queue) == 0 { - return - } - - first := r.queue[0] - r.logger.Infof("fleet reporter dropped event because threshold[%d] was reached: %v", r.threshold, first) - r.queue = r.queue[1:] -} - -// Check it is reporter.Backend. -var _ reporter.Backend = &Reporter{} diff --git a/internal/pkg/reporter/fleet/reporter_test.go b/internal/pkg/reporter/fleet/reporter_test.go deleted file mode 100644 index c5160168a98..00000000000 --- a/internal/pkg/reporter/fleet/reporter_test.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "context" - "testing" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/reporter" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestEventsHaveAgentID(t *testing.T) { - // setup client - threshold := 10 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - for _, e := range reportedEvents { - re, ok := e.(*event) - - if !ok { - t.Fatal("reported event is not an event") - } - - if re.AgentID != "agentID" { - t.Fatalf("reported event id incorrect, expected: 'agentID', got: '%v'", re.AgentID) - } - } - -} - -func TestReporting(t *testing.T) { - // setup client - threshold := 10 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents, ack := r.Events() - if reportedCount := len(reportedEvents); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - // reset reported events - ack() - - // report events > threshold - secondBatchSize := threshold + 1 - ee = getEvents(secondBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check events are dropped - reportedEvents, _ = r.Events() - if reportedCount := len(reportedEvents); reportedCount != threshold { - t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) - } -} - -func TestInfoDrop(t *testing.T) { - // setup client - threshold := 2 - r := newTestReporter(2*time.Second, threshold) - - // report 1 info and 1 error - ee := []reporter.Event{testStateEvent{}, testErrorEvent{}, testErrorEvent{}} - - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != 2 { - t.Fatalf("expected %v events got %v", 2, reportedCount) - } - - // check both are errors - if reportedEvents[0].Type() != reportedEvents[1].Type() || reportedEvents[0].Type() != reporter.EventTypeError { - t.Fatalf("expected ERROR events got [1]: '%v', [2]: '%v'", reportedEvents[0].Type(), reportedEvents[1].Type()) - } -} - -func TestOutOfOrderAck(t *testing.T) { - // setup client - threshold := 100 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents1, ack1 := r.Events() - if reportedCount := len(reportedEvents1); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - // report events > threshold - secondBatchSize := threshold + 1 - ee = getEvents(secondBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check all events are returned - reportedEvents2, ack2 := r.Events() - if reportedCount := len(reportedEvents2); reportedCount == firstBatchSize+secondBatchSize { - t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) - } - - // ack second batch - ack2() - - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != 0 { - t.Fatalf("expected all events are removed after second batch ack, got %v events", reportedCount) - } - - defer func() { - r := recover() - if r != nil { - t.Fatalf("expected ack is ignored but it paniced: %v", r) - } - }() - - ack1() - reportedEvents, _ = r.Events() - if reportedCount := len(reportedEvents); reportedCount != 0 { - t.Fatalf("expected all events are still removed after first batch ack, got %v events", reportedCount) - } -} - -func TestAfterDrop(t *testing.T) { - // setup client - threshold := 7 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents1, ack1 := r.Events() - if reportedCount := len(reportedEvents1); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - // report events > threshold - secondBatchSize := 5 - ee = getEvents(secondBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check all events are returned - reportedEvents2, _ := r.Events() - if reportedCount := len(reportedEvents2); reportedCount != threshold { - t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) - } - - // remove first batch from queue - ack1() - - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != secondBatchSize { - t.Fatalf("expected all events from first batch are removed, got %v events", reportedCount) - } - -} - -func getEvents(count int) []reporter.Event { - ee := make([]reporter.Event, 0, count) - for i := 0; i < count; i++ { - ee = append(ee, testStateEvent{}) - } - - return ee -} - -func newTestReporter(frequency time.Duration, threshold int) *Reporter { - log, _ := logger.New("", false) - r := &Reporter{ - info: &testInfo{}, - queue: make([]fleetapi.SerializableEvent, 0), - logger: log, - threshold: threshold, - } - - return r -} - -type testInfo struct{} - -func (*testInfo) AgentID() string { return "agentID" } - -type testStateEvent struct{} - -func (testStateEvent) Type() string { return reporter.EventTypeState } -func (testStateEvent) SubType() string { return reporter.EventSubTypeInProgress } -func (testStateEvent) Time() time.Time { return time.Unix(0, 1) } -func (testStateEvent) Message() string { return "hello" } -func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } - -type testErrorEvent struct{} - -func (testErrorEvent) Type() string { return reporter.EventTypeError } -func (testErrorEvent) SubType() string { return "PATH" } -func (testErrorEvent) Time() time.Time { return time.Unix(0, 1) } -func (testErrorEvent) Message() string { return "hello" } -func (testErrorEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } diff --git a/internal/pkg/reporter/log/format.go b/internal/pkg/reporter/log/format.go deleted file mode 100644 index 3cac93aa0cf..00000000000 --- a/internal/pkg/reporter/log/format.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package log - -import ( - "fmt" - "time" -) - -// Format used for logging [DefaultFormat, JSONFormat] -type Format bool - -const ( - // DefaultFormat is a log format, resulting in: "2006-01-02T15:04:05: type: 'STATE': event type: 'STARTING' message: Application 'filebeat' is starting." - DefaultFormat Format = true - // JSONFormat is a log format, resulting in: {"timestamp": "2006-01-02T15:04:05", "type": "STATE", "event": {"type": "STARTING", "message": "Application 'filebeat' is starting."} - JSONFormat Format = false -) - -const ( - // e.g "2006-01-02T15:04:05 - message: Application 'filebeat' is starting. - type: 'STATE' - event type: 'STARTING'" - defaultLogFormat = "%s - message: %s - type: '%s' - sub_type: '%s'" - timeFormat = time.RFC3339 -) - -var formatMap = map[string]Format{ - "default": DefaultFormat, - "json": JSONFormat, -} - -var reverseMap = map[bool]string{ - true: "default", - false: "json", -} - -// Unpack enables using of string values in config -func (m *Format) Unpack(v string) error { - mgt, ok := formatMap[v] - if !ok { - return fmt.Errorf( - "unknown format, received '%s' and valid values are default or json", - v, - ) - } - *m = mgt - return nil -} - -// MarshalYAML marshal into a string. -func (m Format) MarshalYAML() (interface{}, error) { - s, ok := reverseMap[bool(m)] - if !ok { - return nil, fmt.Errorf("cannot marshal value of %+v", m) - } - - return s, nil -} diff --git a/internal/pkg/reporter/log/reporter.go b/internal/pkg/reporter/log/reporter.go deleted file mode 100644 index 394544a75f1..00000000000 --- a/internal/pkg/reporter/log/reporter.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package log - -import ( - "context" - "fmt" - - "github.com/elastic/elastic-agent/internal/pkg/reporter" -) - -type logger interface { - Error(...interface{}) - Info(...interface{}) -} - -// Reporter is a reporter without any effects, serves just as a showcase for further implementations. -type Reporter struct { - logger logger - formatFunc func(record reporter.Event) string -} - -// NewReporter creates a new noop reporter -func NewReporter(l logger) *Reporter { - return &Reporter{ - logger: l, - formatFunc: defaultFormatFunc, - } -} - -// Report in noop reporter does nothing -func (r *Reporter) Report(ctx context.Context, record reporter.Event) error { - if record.Type() == reporter.EventTypeError { - r.logger.Error(r.formatFunc(record)) - return nil - } - - r.logger.Info(r.formatFunc(record)) - return nil -} - -// Close stops all the background jobs reporter is running. -func (r *Reporter) Close() error { return nil } - -func defaultFormatFunc(e reporter.Event) string { - return fmt.Sprintf(defaultLogFormat, - e.Time().Format(timeFormat), - e.Message(), - e.Type(), - e.SubType(), - ) -} - -// Check it is reporter.Backend -var _ reporter.Backend = &Reporter{} diff --git a/internal/pkg/reporter/log/reporter_test.go b/internal/pkg/reporter/log/reporter_test.go deleted file mode 100644 index 5453c11c674..00000000000 --- a/internal/pkg/reporter/log/reporter_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package log - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/reporter" -) - -type testCase struct { - event reporter.Event - expectedInfo string - expectedError string -} - -func TestReport(t *testing.T) { - infoEvent := generateEvent(reporter.EventTypeState, reporter.EventSubTypeStarting) - errorEvent := generateEvent(reporter.EventTypeError, reporter.EventSubTypeConfig) - - testCases := []testCase{ - {infoEvent, DefaultString(infoEvent), ""}, - {errorEvent, "", DefaultString(errorEvent)}, - } - - for _, tc := range testCases { - log := newTestLogger() - rep := NewReporter(log) - - rep.Report(context.Background(), tc.event) - - if got := log.info(); tc.expectedInfo != got { - t.Errorf("[%s(%v)] expected info '%s' got '%s'", tc.event.Type(), tc.event.SubType(), tc.expectedInfo, got) - } - - if got := log.error(); tc.expectedError != got { - t.Errorf("[%s(%v)] expected error '%s' got '%s'", tc.event.Type(), tc.event.SubType(), tc.expectedError, got) - } - } -} - -type testLogger struct { - errorLog string - infoLog string -} - -func newTestLogger() *testLogger { - t := &testLogger{} - return t -} - -func (t *testLogger) Error(args ...interface{}) { - t.errorLog = fmt.Sprint(args...) -} - -func (t *testLogger) Info(args ...interface{}) { - t.infoLog = fmt.Sprint(args...) -} - -func (t *testLogger) error() string { - return t.errorLog -} - -func (t *testLogger) info() string { - return t.infoLog -} - -func generateEvent(eventype, subType string) testEvent { - return testEvent{ - eventtype: eventype, - subType: subType, - timestamp: time.Unix(0, 1), - message: "message", - } -} - -type testEvent struct { - eventtype string - subType string - timestamp time.Time - message string -} - -func (t testEvent) Type() string { return t.eventtype } -func (t testEvent) SubType() string { return t.subType } -func (t testEvent) Time() time.Time { return t.timestamp } -func (t testEvent) Message() string { return t.message } -func (testEvent) Payload() map[string]interface{} { return map[string]interface{}{} } - -func DefaultString(event testEvent) string { - timestamp := event.timestamp.Format(timeFormat) - return fmt.Sprintf("%s - message: message - type: '%s' - sub_type: '%s'", timestamp, event.Type(), event.SubType()) -} diff --git a/internal/pkg/reporter/noop/reporter.go b/internal/pkg/reporter/noop/reporter.go deleted file mode 100644 index 5effde4a396..00000000000 --- a/internal/pkg/reporter/noop/reporter.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package noop - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/reporter" -) - -// Reporter is a reporter without any effects, serves just as a showcase for further implementations. -type Reporter struct{} - -// NewReporter creates a new noop reporter -func NewReporter() *Reporter { - return &Reporter{} -} - -// Report in noop reporter does nothing -func (*Reporter) Report(_ context.Context, _ reporter.Event) error { return nil } - -// Close stops all the background jobs reporter is running. -func (*Reporter) Close() error { return nil } - -// Check it is reporter.Backend -var _ reporter.Backend = &Reporter{} diff --git a/internal/pkg/reporter/reporter.go b/internal/pkg/reporter/reporter.go deleted file mode 100644 index 8c2a6c12ccb..00000000000 --- a/internal/pkg/reporter/reporter.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package reporter - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - // EventTypeState is an record type describing application state change - EventTypeState = "STATE" - // EventTypeError is an record type describing application error - EventTypeError = "ERROR" - // EventTypeActionResult is an record type describing applications result of an action - EventTypeActionResult = "ACTION_RESULT" - - // EventSubTypeStopped is an event type indicating application is stopped. - EventSubTypeStopped = "STOPPED" - // EventSubTypeStarting is an event type indicating application is starting. - EventSubTypeStarting = "STARTING" - // EventSubTypeInProgress is an event type indicating application is in progress. - EventSubTypeInProgress = "IN_PROGRESS" - // EventSubTypeConfig is an event indicating application config related event. - EventSubTypeConfig = "CONFIG" - // EventSubTypeRunning is an event indicating application running related event. - EventSubTypeRunning = "RUNNING" - // EventSubTypeFailed is an event type indicating application is failed. - EventSubTypeFailed = "FAILED" - // EventSubTypeStopping is an event type indicating application is stopping. - EventSubTypeStopping = "STOPPING" - // EventSubTypeUpdating is an event type indicating update process in progress. - EventSubTypeUpdating = "UPDATING" -) - -type agentInfo interface { - AgentID() string -} - -// Reporter uses multiple backends which needs to be non-blocking -// to report various events. -type Reporter struct { - ctx context.Context - info agentInfo - backends []Backend - - l *logger.Logger -} - -// NewReporter creates a new reporter with provided set of Backends. -func NewReporter(ctx context.Context, logger *logger.Logger, info agentInfo, backends ...Backend) *Reporter { - return &Reporter{ - ctx: ctx, - info: info, - backends: backends, - l: logger, - } -} - -// Close stops the reporter. For further reporting new reporter needs to be created. -func (r *Reporter) Close() { - for _, c := range r.backends { - c.Close() - } -} - -// OnStateChange called when state of an application changes. -func (r *Reporter) OnStateChange(id string, name string, state state.State) { - rec := generateRecord(r.info.AgentID(), id, name, state) - r.report(r.ctx, rec) -} - -func (r *Reporter) report(ctx context.Context, e event) { - var err error - - for _, b := range r.backends { - if er := b.Report(ctx, e); er != nil { - err = multierror.Append(err, er) - } - } - - if err != nil { - r.l.Error(errors.New(err, "failed reporting event")) - } -} - -func generateRecord(agentID string, id string, name string, s state.State) event { - eventType := EventTypeState - - var subType string - var subTypeText string - switch s.Status { - case state.Stopped: - subType = EventSubTypeStopped - subTypeText = EventSubTypeStopped - case state.Starting: - subType = EventSubTypeStarting - subTypeText = EventSubTypeStarting - case state.Configuring: - subType = EventSubTypeConfig - subTypeText = EventSubTypeConfig - case state.Healthy: - subType = EventSubTypeRunning - subTypeText = EventSubTypeRunning - case state.Degraded: - // Fleet doesn't understand degraded - subType = EventSubTypeRunning - subTypeText = "DEGRADED" - case state.Failed: - eventType = EventTypeError - subType = EventSubTypeFailed - subTypeText = EventSubTypeFailed - case state.Crashed: - eventType = EventTypeError - subType = EventSubTypeFailed - subTypeText = "CRASHED" - case state.Stopping: - subType = EventSubTypeStopping - subTypeText = EventSubTypeStopping - case state.Restarting: - subType = EventSubTypeStarting - subTypeText = "RESTARTING" - case state.Updating: - subType = EventSubTypeUpdating - subTypeText = EventSubTypeUpdating - - } - - err := errors.New( - fmt.Errorf(s.Message), - fmt.Sprintf("Application: %s[%s]: State changed to %s", id, agentID, subTypeText), - errors.TypeApplication, - errors.M(errors.MetaKeyAppID, id), - errors.M(errors.MetaKeyAppName, name)) - var payload map[string]interface{} - if s.Payload != nil { - payload = map[string]interface{}{ - name: s.Payload, - } - } - return event{ - eventype: eventType, - subType: subType, - timestamp: time.Now(), - message: err.Error(), - payload: payload, - } -} diff --git a/internal/pkg/reporter/reporter_test.go b/internal/pkg/reporter/reporter_test.go deleted file mode 100644 index d5f3cf4ef51..00000000000 --- a/internal/pkg/reporter/reporter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package reporter - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" -) - -var result Event - -type testReporter struct{} - -func (t *testReporter) Close() error { return nil } -func (t *testReporter) Report(_ context.Context, r Event) error { - result = r - return nil -} - -type info struct{} - -func (*info) AgentID() string { return "id" } - -type testScenario struct { - Status state.Status - StatusMessage string - EventType string - EventSubType string - EventMessage string -} - -func TestTypes(t *testing.T) { - rep := NewReporter(context.Background(), nil, &info{}, &testReporter{}) - scenarios := []testScenario{ - { - Status: state.Stopped, - StatusMessage: "Stopped", - EventType: EventTypeState, - EventSubType: EventSubTypeStopped, - EventMessage: "Application: a-stopped[id]: State changed to STOPPED: Stopped", - }, - { - Status: state.Starting, - StatusMessage: "Starting", - EventType: EventTypeState, - EventSubType: EventSubTypeStarting, - EventMessage: "Application: a-starting[id]: State changed to STARTING: Starting", - }, - { - Status: state.Configuring, - StatusMessage: "Configuring", - EventType: EventTypeState, - EventSubType: EventSubTypeConfig, - EventMessage: "Application: a-configuring[id]: State changed to CONFIG: Configuring", - }, - { - Status: state.Healthy, - StatusMessage: "Healthy", - EventType: EventTypeState, - EventSubType: EventSubTypeRunning, - EventMessage: "Application: a-healthy[id]: State changed to RUNNING: Healthy", - }, - { - Status: state.Degraded, - StatusMessage: "Degraded", - EventType: EventTypeState, - EventSubType: EventSubTypeRunning, - EventMessage: "Application: a-degraded[id]: State changed to DEGRADED: Degraded", - }, - { - Status: state.Failed, - StatusMessage: "Failed", - EventType: EventTypeError, - EventSubType: EventSubTypeFailed, - EventMessage: "Application: a-failed[id]: State changed to FAILED: Failed", - }, - { - Status: state.Crashed, - StatusMessage: "Crashed", - EventType: EventTypeError, - EventSubType: EventSubTypeFailed, - EventMessage: "Application: a-crashed[id]: State changed to CRASHED: Crashed", - }, - { - Status: state.Stopping, - StatusMessage: "Stopping", - EventType: EventTypeState, - EventSubType: EventSubTypeStopping, - EventMessage: "Application: a-stopping[id]: State changed to STOPPING: Stopping", - }, - { - Status: state.Restarting, - StatusMessage: "Restarting", - EventType: EventTypeState, - EventSubType: EventSubTypeStarting, - EventMessage: "Application: a-restarting[id]: State changed to RESTARTING: Restarting", - }, - } - for _, scenario := range scenarios { - t.Run(scenario.StatusMessage, func(t *testing.T) { - appID := fmt.Sprintf("a-%s", strings.ToLower(scenario.StatusMessage)) - appName := fmt.Sprintf("app-%s", strings.ToLower(scenario.StatusMessage)) - rep.OnStateChange(appID, appName, state.State{ - Status: scenario.Status, - Message: scenario.StatusMessage, - }) - assert.Equal(t, scenario.EventType, result.Type()) - assert.Equal(t, scenario.EventSubType, result.SubType()) - assert.Equal(t, scenario.EventMessage, result.Message()) - }) - } -} From 16595551a008c5fc8aa7c99377f4753f9d987808 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 21 Jul 2022 10:54:30 -0400 Subject: [PATCH 16/19] Clean up step_download from refactor. --- .../agent/application/managed_mode_test.go | 304 ------------------ .../application/upgrade/step_download.go | 33 +- 2 files changed, 16 insertions(+), 321 deletions(-) delete mode 100644 internal/pkg/agent/application/managed_mode_test.go diff --git a/internal/pkg/agent/application/managed_mode_test.go b/internal/pkg/agent/application/managed_mode_test.go deleted file mode 100644 index 206dd90079e..00000000000 --- a/internal/pkg/agent/application/managed_mode_test.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package application - -import ( - "context" - "encoding/json" - "testing" - - handlers2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestManagedModeRouting(t *testing.T) { - - streams := make(map[pipeline.RoutingKey]pipeline.Stream) - streamFn := func(l *logger.Logger, r pipeline.RoutingKey) (pipeline.Stream, error) { - m := newMockStreamStore() - streams[r] = m - - return m, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - log, _ := logger.New("", false) - router, _ := router.New(log, streamFn) - agentInfo, _ := info.NewAgentInfo(false) - nullStore := &storage.NullStore{} - composableCtrl, _ := composable.New(log, nil) - emit, err := emitter.New(ctx, log, agentInfo, composableCtrl, router, &pipeline.ConfigModifiers{Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}}, nil) - require.NoError(t, err) - - actionDispatcher, err := dispatcher.New(ctx, log, handlers2.NewDefault(log)) - require.NoError(t, err) - - cfg := configuration.DefaultConfiguration() - actionDispatcher.MustRegister( - &fleetapi.ActionPolicyChange{}, - handlers2.NewPolicyChange( - log, - emit, - agentInfo, - cfg, - nullStore, - ), - ) - - actions, err := testActions() - require.NoError(t, err) - - err = actionDispatcher.Dispatch(context.Background(), noopacker.NewAcker(), actions...) - require.NoError(t, err) - - // has 1 config request for fb, mb and monitoring? - assert.Equal(t, 1, len(streams)) - - defaultStreamStore, found := streams["default"] - assert.True(t, found, "default group not found") - assert.Equal(t, 1, len(defaultStreamStore.(*mockStreamStore).store)) - - confReq := defaultStreamStore.(*mockStreamStore).store[0] - assert.Equal(t, 3, len(confReq.ProgramNames())) - assert.Equal(t, modifiers.MonitoringName, confReq.ProgramNames()[2]) -} - -func testActions() ([]fleetapi.Action, error) { - checkinResponse := &fleetapi.CheckinResponse{} - if err := json.Unmarshal([]byte(fleetResponse), &checkinResponse); err != nil { - return nil, err - } - - return checkinResponse.Actions, nil -} - -type mockStreamStore struct { - store []configrequest.Request -} - -func newMockStreamStore() *mockStreamStore { - return &mockStreamStore{ - store: make([]configrequest.Request, 0), - } -} - -func (m *mockStreamStore) Execute(_ context.Context, cr configrequest.Request) error { - m.store = append(m.store, cr) - return nil -} - -func (m *mockStreamStore) Close() error { - return nil -} - -func (m *mockStreamStore) Shutdown() {} - -const fleetResponse = ` -{ - "action": "checkin", - "actions": [{ - "agent_id": "17e93530-7f42-11ea-9330-71e968b29fa4", - "type": "POLICY_CHANGE", - "data": { - "policy": { - "id": "86561d50-7f3b-11ea-9fab-3db3bdb4efa4", - "outputs": { - "default": { - "type": "elasticsearch", - "hosts": [ - "http://localhost:9200" - ], - "api_key": "pNr6fnEBupQ3-5oEEkWJ:FzhrQOzZSG-Vpsq9CGk4oA" - } - }, - - "inputs": [{ - "type": "system/metrics", - "enabled": true, - "streams": [{ - "id": "system/metrics-system.core", - "enabled": true, - "data_stream.dataset": "system.core", - "period": "10s", - "metrics": [ - "percentages" - ] - }, - { - "id": "system/metrics-system.cpu", - "enabled": true, - "data_stream.dataset": "system.cpu", - "period": "10s", - "metrics": [ - "percentages", - "normalized_percentages" - ] - }, - { - "id": "system/metrics-system.diskio", - "enabled": true, - "data_stream.dataset": "system.diskio", - "period": "10s", - "include_devices": [] - }, - { - "id": "system/metrics-system.entropy", - "enabled": true, - "data_stream.dataset": "system.entropy", - "period": "10s", - "include_devices": [] - }, - { - "id": "system/metrics-system.filesystem", - "enabled": true, - "data_stream.dataset": "system.filesystem", - "period": "1m", - "ignore_types": [] - }, - { - "id": "system/metrics-system.fsstat", - "enabled": true, - "data_stream.dataset": "system.fsstat", - "period": "1m", - "ignore_types": [] - }, - { - "id": "system/metrics-system.load", - "enabled": true, - "data_stream.dataset": "system.load", - "period": "10s" - }, - { - "id": "system/metrics-system.memory", - "enabled": true, - "data_stream.dataset": "system.memory", - "period": "10s" - }, - { - "id": "system/metrics-system.network", - "enabled": true, - "data_stream.dataset": "system.network", - "period": "10s" - }, - { - "id": "system/metrics-system.network_summary", - "enabled": true, - "data_stream.dataset": "system.network_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.process", - "enabled": true, - "data_stream.dataset": "system.process", - "period": "10s", - "processes": [ - ".*" - ], - "include_top_n.enabled": true, - "include_top_n.by_cpu": 5, - "include_top_n.by_memory": 5, - "cmdline.cache.enabled": true, - "cgroups.enabled": true, - "env.whitelist": [], - "include_cpu_ticks": false - }, - { - "id": "system/metrics-system.process_summary", - "enabled": true, - "data_stream.dataset": "system.process_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.raid", - "enabled": true, - "data_stream.dataset": "system.raid", - "period": "10s", - "mount_point": "/" - }, - { - "id": "system/metrics-system.service", - "enabled": true, - "data_stream.dataset": "system.service", - "period": "10s", - "state_filter": [] - }, - { - "id": "system/metrics-system.socket_summary", - "enabled": true, - "data_stream.dataset": "system.socket_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.uptime", - "enabled": true, - "data_stream.dataset": "system.uptime", - "period": "15m" - }, - { - "id": "system/metrics-system.users", - "enabled": true, - "data_stream.dataset": "system.users", - "period": "10s" - } - ] - }, - { - "type": "logfile", - "enabled": true, - "streams": [{ - "id": "logs-system.auth", - "enabled": true, - "data_stream.dataset": "system.auth", - "paths": [ - "/var/log/auth.log*", - "/var/log/secure*" - ] - }, - { - "id": "logs-system.syslog", - "enabled": true, - "data_stream.dataset": "system.syslog", - "paths": [ - "/var/log/messages*", - "/var/log/syslog*" - ] - } - ] - } - ], - - "revision": 3, - "agent.monitoring": { - "use_output": "default", - "enabled": true, - "logs": true, - "metrics": true - } - } - }, - "id": "1c7e26a0-7f42-11ea-9330-71e968b29fa4", - "created_at": "2020-04-15T17:54:11.081Z" - }] -} - ` diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index 76cad7a416a..926e310fda3 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -8,16 +8,15 @@ import ( "context" "strings" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" - download2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" - composed2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/composed" - fs2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/fs" - http2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/localremote" - snapshot2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/snapshot" - "go.elastic.co/apm" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/composed" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/fs" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/localremote" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/snapshot" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -63,45 +62,45 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri return path, nil } -func newDownloader(version string, log *logger.Logger, settings *artifact.Config) (download2.Downloader, error) { +func newDownloader(version string, log *logger.Logger, settings *artifact.Config) (download.Downloader, error) { if !strings.HasSuffix(version, "-SNAPSHOT") { return localremote.NewDownloader(log, settings) } // try snapshot repo before official - snapDownloader, err := snapshot2.NewDownloader(log, settings, version) + snapDownloader, err := snapshot.NewDownloader(log, settings, version) if err != nil { return nil, err } - httpDownloader, err := http2.NewDownloader(log, settings) + httpDownloader, err := http.NewDownloader(log, settings) if err != nil { return nil, err } - return composed2.NewDownloader(fs2.NewDownloader(settings), snapDownloader, httpDownloader), nil + return composed.NewDownloader(fs.NewDownloader(settings), snapDownloader, httpDownloader), nil } -func newVerifier(version string, log *logger.Logger, settings *artifact.Config) (download2.Verifier, error) { +func newVerifier(version string, log *logger.Logger, settings *artifact.Config) (download.Verifier, error) { allowEmptyPgp, pgp := release.PGP() if !strings.HasSuffix(version, "-SNAPSHOT") { return localremote.NewVerifier(log, settings, allowEmptyPgp, pgp) } - fsVerifier, err := fs2.NewVerifier(settings, allowEmptyPgp, pgp) + fsVerifier, err := fs.NewVerifier(settings, allowEmptyPgp, pgp) if err != nil { return nil, err } - snapshotVerifier, err := snapshot2.NewVerifier(settings, allowEmptyPgp, pgp, version) + snapshotVerifier, err := snapshot.NewVerifier(settings, allowEmptyPgp, pgp, version) if err != nil { return nil, err } - remoteVerifier, err := http2.NewVerifier(settings, allowEmptyPgp, pgp) + remoteVerifier, err := http.NewVerifier(settings, allowEmptyPgp, pgp) if err != nil { return nil, err } - return composed2.NewVerifier(fsVerifier, snapshotVerifier, remoteVerifier), nil + return composed.NewVerifier(fsVerifier, snapshotVerifier, remoteVerifier), nil } From 1191d107b514e2de164a32eb336095d27ef3c827 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 21 Jul 2022 13:35:43 -0400 Subject: [PATCH 17/19] Remove outdated managed_mode_test.go. --- .../agent/application/managed_mode_test.go | 304 ------------------ 1 file changed, 304 deletions(-) delete mode 100644 internal/pkg/agent/application/managed_mode_test.go diff --git a/internal/pkg/agent/application/managed_mode_test.go b/internal/pkg/agent/application/managed_mode_test.go deleted file mode 100644 index 206dd90079e..00000000000 --- a/internal/pkg/agent/application/managed_mode_test.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package application - -import ( - "context" - "encoding/json" - "testing" - - handlers2 "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestManagedModeRouting(t *testing.T) { - - streams := make(map[pipeline.RoutingKey]pipeline.Stream) - streamFn := func(l *logger.Logger, r pipeline.RoutingKey) (pipeline.Stream, error) { - m := newMockStreamStore() - streams[r] = m - - return m, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - log, _ := logger.New("", false) - router, _ := router.New(log, streamFn) - agentInfo, _ := info.NewAgentInfo(false) - nullStore := &storage.NullStore{} - composableCtrl, _ := composable.New(log, nil) - emit, err := emitter.New(ctx, log, agentInfo, composableCtrl, router, &pipeline.ConfigModifiers{Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}}, nil) - require.NoError(t, err) - - actionDispatcher, err := dispatcher.New(ctx, log, handlers2.NewDefault(log)) - require.NoError(t, err) - - cfg := configuration.DefaultConfiguration() - actionDispatcher.MustRegister( - &fleetapi.ActionPolicyChange{}, - handlers2.NewPolicyChange( - log, - emit, - agentInfo, - cfg, - nullStore, - ), - ) - - actions, err := testActions() - require.NoError(t, err) - - err = actionDispatcher.Dispatch(context.Background(), noopacker.NewAcker(), actions...) - require.NoError(t, err) - - // has 1 config request for fb, mb and monitoring? - assert.Equal(t, 1, len(streams)) - - defaultStreamStore, found := streams["default"] - assert.True(t, found, "default group not found") - assert.Equal(t, 1, len(defaultStreamStore.(*mockStreamStore).store)) - - confReq := defaultStreamStore.(*mockStreamStore).store[0] - assert.Equal(t, 3, len(confReq.ProgramNames())) - assert.Equal(t, modifiers.MonitoringName, confReq.ProgramNames()[2]) -} - -func testActions() ([]fleetapi.Action, error) { - checkinResponse := &fleetapi.CheckinResponse{} - if err := json.Unmarshal([]byte(fleetResponse), &checkinResponse); err != nil { - return nil, err - } - - return checkinResponse.Actions, nil -} - -type mockStreamStore struct { - store []configrequest.Request -} - -func newMockStreamStore() *mockStreamStore { - return &mockStreamStore{ - store: make([]configrequest.Request, 0), - } -} - -func (m *mockStreamStore) Execute(_ context.Context, cr configrequest.Request) error { - m.store = append(m.store, cr) - return nil -} - -func (m *mockStreamStore) Close() error { - return nil -} - -func (m *mockStreamStore) Shutdown() {} - -const fleetResponse = ` -{ - "action": "checkin", - "actions": [{ - "agent_id": "17e93530-7f42-11ea-9330-71e968b29fa4", - "type": "POLICY_CHANGE", - "data": { - "policy": { - "id": "86561d50-7f3b-11ea-9fab-3db3bdb4efa4", - "outputs": { - "default": { - "type": "elasticsearch", - "hosts": [ - "http://localhost:9200" - ], - "api_key": "pNr6fnEBupQ3-5oEEkWJ:FzhrQOzZSG-Vpsq9CGk4oA" - } - }, - - "inputs": [{ - "type": "system/metrics", - "enabled": true, - "streams": [{ - "id": "system/metrics-system.core", - "enabled": true, - "data_stream.dataset": "system.core", - "period": "10s", - "metrics": [ - "percentages" - ] - }, - { - "id": "system/metrics-system.cpu", - "enabled": true, - "data_stream.dataset": "system.cpu", - "period": "10s", - "metrics": [ - "percentages", - "normalized_percentages" - ] - }, - { - "id": "system/metrics-system.diskio", - "enabled": true, - "data_stream.dataset": "system.diskio", - "period": "10s", - "include_devices": [] - }, - { - "id": "system/metrics-system.entropy", - "enabled": true, - "data_stream.dataset": "system.entropy", - "period": "10s", - "include_devices": [] - }, - { - "id": "system/metrics-system.filesystem", - "enabled": true, - "data_stream.dataset": "system.filesystem", - "period": "1m", - "ignore_types": [] - }, - { - "id": "system/metrics-system.fsstat", - "enabled": true, - "data_stream.dataset": "system.fsstat", - "period": "1m", - "ignore_types": [] - }, - { - "id": "system/metrics-system.load", - "enabled": true, - "data_stream.dataset": "system.load", - "period": "10s" - }, - { - "id": "system/metrics-system.memory", - "enabled": true, - "data_stream.dataset": "system.memory", - "period": "10s" - }, - { - "id": "system/metrics-system.network", - "enabled": true, - "data_stream.dataset": "system.network", - "period": "10s" - }, - { - "id": "system/metrics-system.network_summary", - "enabled": true, - "data_stream.dataset": "system.network_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.process", - "enabled": true, - "data_stream.dataset": "system.process", - "period": "10s", - "processes": [ - ".*" - ], - "include_top_n.enabled": true, - "include_top_n.by_cpu": 5, - "include_top_n.by_memory": 5, - "cmdline.cache.enabled": true, - "cgroups.enabled": true, - "env.whitelist": [], - "include_cpu_ticks": false - }, - { - "id": "system/metrics-system.process_summary", - "enabled": true, - "data_stream.dataset": "system.process_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.raid", - "enabled": true, - "data_stream.dataset": "system.raid", - "period": "10s", - "mount_point": "/" - }, - { - "id": "system/metrics-system.service", - "enabled": true, - "data_stream.dataset": "system.service", - "period": "10s", - "state_filter": [] - }, - { - "id": "system/metrics-system.socket_summary", - "enabled": true, - "data_stream.dataset": "system.socket_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.uptime", - "enabled": true, - "data_stream.dataset": "system.uptime", - "period": "15m" - }, - { - "id": "system/metrics-system.users", - "enabled": true, - "data_stream.dataset": "system.users", - "period": "10s" - } - ] - }, - { - "type": "logfile", - "enabled": true, - "streams": [{ - "id": "logs-system.auth", - "enabled": true, - "data_stream.dataset": "system.auth", - "paths": [ - "/var/log/auth.log*", - "/var/log/secure*" - ] - }, - { - "id": "logs-system.syslog", - "enabled": true, - "data_stream.dataset": "system.syslog", - "paths": [ - "/var/log/messages*", - "/var/log/syslog*" - ] - } - ] - } - ], - - "revision": 3, - "agent.monitoring": { - "use_output": "default", - "enabled": true, - "logs": true, - "metrics": true - } - } - }, - "id": "1c7e26a0-7f42-11ea-9330-71e968b29fa4", - "created_at": "2020-04-15T17:54:11.081Z" - }] -} - ` From c8f9c456b073a5d58ad75d884e5051c4963d5804 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Fri, 22 Jul 2022 09:55:45 -0400 Subject: [PATCH 18/19] Fixes from code review and lint. --- internal/pkg/composable/context.go | 8 ++++++++ internal/pkg/composable/controller.go | 8 ++++---- internal/pkg/composable/controller_test.go | 9 ++++++--- internal/pkg/composable/dynamic.go | 8 ++++++++ internal/pkg/composable/providers/agent/agent.go | 2 +- internal/pkg/composable/providers/docker/docker.go | 2 +- internal/pkg/composable/providers/env/env.go | 2 +- internal/pkg/composable/providers/host/host.go | 2 +- .../pkg/composable/providers/kubernetes/kubernetes.go | 2 +- .../kubernetes_leaderelection.go | 2 +- .../providers/kubernetessecrets/kubernetes_secrets.go | 2 +- internal/pkg/composable/providers/local/local.go | 2 +- .../composable/providers/localdynamic/localdynamic.go | 2 +- internal/pkg/composable/providers/path/path.go | 2 +- version/version.go | 2 +- 15 files changed, 37 insertions(+), 18 deletions(-) diff --git a/internal/pkg/composable/context.go b/internal/pkg/composable/context.go index 1dcb50cf956..97767f4a5d5 100644 --- a/internal/pkg/composable/context.go +++ b/internal/pkg/composable/context.go @@ -16,6 +16,14 @@ import ( // ContextProviderBuilder creates a new context provider based on the given config and returns it. type ContextProviderBuilder func(log *logger.Logger, config *config.Config) (corecomp.ContextProvider, error) +// MustAddContextProvider adds a new ContextProviderBuilder and panics if it AddContextProvider returns an error. +func (r *providerRegistry) MustAddContextProvider(name string, builder ContextProviderBuilder) { + err := r.AddContextProvider(name, builder) + if err != nil { + panic(err) + } +} + // AddContextProvider adds a new ContextProviderBuilder func (r *providerRegistry) AddContextProvider(name string, builder ContextProviderBuilder) error { r.lock.Lock() diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index 41b994b7406..116424ae8e4 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -119,14 +119,14 @@ func (c *controller) Run(ctx context.Context) error { for name, state := range c.contextProviders { state.Context = localCtx state.signal = notify - go func() { + go func(name string, state *contextProviderState) { defer wg.Done() err := state.provider.Run(state) if err != nil && !errors.Is(err, context.Canceled) { err = errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) c.logger.Errorf("%s", err) } - }() + }(name, state) if p, ok := state.provider.(corecomp.FetchContextProvider); ok { _, _ = fetchContextProviders.Put(name, p) } @@ -136,14 +136,14 @@ func (c *controller) Run(ctx context.Context) error { for name, state := range c.dynamicProviders { state.Context = localCtx state.signal = notify - go func() { + go func(name string, state *dynamicProviderState) { defer wg.Done() err := state.provider.Run(state) if err != nil && !errors.Is(err, context.Canceled) { err = errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) c.logger.Errorf("%s", err) } - }() + }(name, state) } c.logger.Debugf("Started controller for composable inputs") diff --git a/internal/pkg/composable/controller_test.go b/internal/pkg/composable/controller_test.go index 9469639dd79..a8c3ec7df93 100644 --- a/internal/pkg/composable/controller_test.go +++ b/internal/pkg/composable/controller_test.go @@ -117,14 +117,17 @@ func TestController(t *testing.T) { _, envExists := setVars[0].Lookup("env") assert.False(t, envExists) local, _ := setVars[0].Lookup("local") - localMap := local.(map[string]interface{}) + localMap, ok := local.(map[string]interface{}) + require.True(t, ok) assert.Equal(t, "value1", localMap["key1"]) local, _ = setVars[1].Lookup("local_dynamic") - localMap = local.(map[string]interface{}) + localMap, ok = local.(map[string]interface{}) + require.True(t, ok) assert.Equal(t, "value1", localMap["key1"]) local, _ = setVars[2].Lookup("local_dynamic") - localMap = local.(map[string]interface{}) + localMap, ok = local.(map[string]interface{}) + require.True(t, ok) assert.Equal(t, "value2", localMap["key1"]) } diff --git a/internal/pkg/composable/dynamic.go b/internal/pkg/composable/dynamic.go index a0de3543a1c..c83c2ccc2e2 100644 --- a/internal/pkg/composable/dynamic.go +++ b/internal/pkg/composable/dynamic.go @@ -36,6 +36,14 @@ type DynamicProvider interface { // DynamicProviderBuilder creates a new dynamic provider based on the given config and returns it. type DynamicProviderBuilder func(log *logger.Logger, config *config.Config) (DynamicProvider, error) +// MustAddDynamicProvider adds a new DynamicProviderBuilder and panics if it AddDynamicProvider returns an error. +func (r *providerRegistry) MustAddDynamicProvider(name string, builder DynamicProviderBuilder) { + err := r.AddDynamicProvider(name, builder) + if err != nil { + panic(err) + } +} + // AddDynamicProvider adds a new DynamicProviderBuilder func (r *providerRegistry) AddDynamicProvider(name string, builder DynamicProviderBuilder) error { r.lock.Lock() diff --git a/internal/pkg/composable/providers/agent/agent.go b/internal/pkg/composable/providers/agent/agent.go index 2b9d0ff3deb..ed8eb956afe 100644 --- a/internal/pkg/composable/providers/agent/agent.go +++ b/internal/pkg/composable/providers/agent/agent.go @@ -15,7 +15,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("agent", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("agent", ContextProviderBuilder) } type contextProvider struct{} diff --git a/internal/pkg/composable/providers/docker/docker.go b/internal/pkg/composable/providers/docker/docker.go index 0652201a17f..8647677e6e8 100644 --- a/internal/pkg/composable/providers/docker/docker.go +++ b/internal/pkg/composable/providers/docker/docker.go @@ -23,7 +23,7 @@ import ( const ContainerPriority = 0 func init() { - _ = composable.Providers.AddDynamicProvider("docker", DynamicProviderBuilder) + composable.Providers.MustAddDynamicProvider("docker", DynamicProviderBuilder) } type dockerContainerData struct { diff --git a/internal/pkg/composable/providers/env/env.go b/internal/pkg/composable/providers/env/env.go index 4c6b5911f47..6f65120de48 100644 --- a/internal/pkg/composable/providers/env/env.go +++ b/internal/pkg/composable/providers/env/env.go @@ -16,7 +16,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("env", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("env", ContextProviderBuilder) } type contextProvider struct{} diff --git a/internal/pkg/composable/providers/host/host.go b/internal/pkg/composable/providers/host/host.go index a23b92f1d2b..cc98021e77b 100644 --- a/internal/pkg/composable/providers/host/host.go +++ b/internal/pkg/composable/providers/host/host.go @@ -24,7 +24,7 @@ import ( const DefaultCheckInterval = 5 * time.Minute func init() { - composable.Providers.AddContextProvider("host", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("host", ContextProviderBuilder) } type infoFetcher func() (map[string]interface{}, error) diff --git a/internal/pkg/composable/providers/kubernetes/kubernetes.go b/internal/pkg/composable/providers/kubernetes/kubernetes.go index e7d005aa8f1..9f43522f2da 100644 --- a/internal/pkg/composable/providers/kubernetes/kubernetes.go +++ b/internal/pkg/composable/providers/kubernetes/kubernetes.go @@ -30,7 +30,7 @@ const ( const nodeScope = "node" func init() { - _ = composable.Providers.AddDynamicProvider("kubernetes", DynamicProviderBuilder) + composable.Providers.MustAddDynamicProvider("kubernetes", DynamicProviderBuilder) } type dynamicProvider struct { diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go index 4d27192bab5..d0d773d1663 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go @@ -23,7 +23,7 @@ import ( ) func init() { - _ = composable.Providers.AddContextProvider("kubernetes_leaderelection", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("kubernetes_leaderelection", ContextProviderBuilder) } type contextProvider struct { diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go index debc501d3cc..d6e8190c13a 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go @@ -24,7 +24,7 @@ var _ corecomp.FetchContextProvider = (*contextProviderK8sSecrets)(nil) var getK8sClientFunc = getK8sClient func init() { - _ = composable.Providers.AddContextProvider("kubernetes_secrets", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("kubernetes_secrets", ContextProviderBuilder) } type contextProviderK8sSecrets struct { diff --git a/internal/pkg/composable/providers/local/local.go b/internal/pkg/composable/providers/local/local.go index 9c611ecbd13..b44affc78df 100644 --- a/internal/pkg/composable/providers/local/local.go +++ b/internal/pkg/composable/providers/local/local.go @@ -15,7 +15,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("local", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("local", ContextProviderBuilder) } type contextProvider struct { diff --git a/internal/pkg/composable/providers/localdynamic/localdynamic.go b/internal/pkg/composable/providers/localdynamic/localdynamic.go index f4f99ca4030..0fd81738976 100644 --- a/internal/pkg/composable/providers/localdynamic/localdynamic.go +++ b/internal/pkg/composable/providers/localdynamic/localdynamic.go @@ -18,7 +18,7 @@ import ( const ItemPriority = 0 func init() { - composable.Providers.AddDynamicProvider("local_dynamic", DynamicProviderBuilder) + composable.Providers.MustAddDynamicProvider("local_dynamic", DynamicProviderBuilder) } type dynamicItem struct { diff --git a/internal/pkg/composable/providers/path/path.go b/internal/pkg/composable/providers/path/path.go index 455f46d2b28..05af5bcd0b0 100644 --- a/internal/pkg/composable/providers/path/path.go +++ b/internal/pkg/composable/providers/path/path.go @@ -14,7 +14,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("path", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("path", ContextProviderBuilder) } type contextProvider struct{} diff --git a/version/version.go b/version/version.go index 6636201e4dc..07f3cb8046a 100644 --- a/version/version.go +++ b/version/version.go @@ -4,4 +4,4 @@ package version -const defaultBeatVersion = "8.3.0" +const defaultBeatVersion = "8.4.0" From 24732cf32c8fe8e9c159991754ff017b1c70b597 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 26 Jul 2022 14:57:23 -0400 Subject: [PATCH 19/19] Fix lint and missing errcheck. --- dev-tools/mage/pkgtypes.go | 31 +++++++++++-------- internal/pkg/agent/application/application.go | 2 +- .../download/composed/downloader_test.go | 8 +++-- internal/pkg/agent/control/server/server.go | 4 --- internal/pkg/agent/install/uninstall.go | 3 ++ 5 files changed, 28 insertions(+), 20 deletions(-) diff --git a/dev-tools/mage/pkgtypes.go b/dev-tools/mage/pkgtypes.go index a4213fee784..04272b0e8e9 100644 --- a/dev-tools/mage/pkgtypes.go +++ b/dev-tools/mage/pkgtypes.go @@ -2,8 +2,6 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -//nolint:goconst // avoiding const check for Deb/Zip - package mage import ( @@ -41,6 +39,13 @@ const ( defaultBinaryName = "{{.Name}}-{{.Version}}{{if .Snapshot}}-SNAPSHOT{{end}}{{if .OS}}-{{.OS}}{{end}}{{if .Arch}}-{{.Arch}}{{end}}" componentConfigMode os.FileMode = 0600 + + rpm = "rpm" + deb = "deb" + zipExt = "zip" + targz = "tar.gz" + docker = "docker" + invalid = "invalid" ) var ( @@ -205,17 +210,17 @@ func getOSArchName(platform BuildPlatform, t PackageType) (string, error) { func (typ PackageType) String() string { switch typ { case RPM: - return "rpm" + return rpm case Deb: - return "deb" + return deb case Zip: - return "zip" + return zipExt case TarGz: - return "tar.gz" + return targz case Docker: - return "docker" + return docker default: - return "invalid" + return invalid } } @@ -227,15 +232,15 @@ func (typ PackageType) MarshalText() ([]byte, error) { // UnmarshalText returns a PackageType based on the given text. func (typ *PackageType) UnmarshalText(text []byte) error { switch strings.ToLower(string(text)) { - case "rpm": + case rpm: *typ = RPM - case "deb": + case deb: *typ = Deb - case "tar.gz", "tgz", "targz": + case targz, "tgz", "targz": *typ = TarGz - case "zip": + case zipExt: *typ = Zip - case "docker": + case docker: *typ = Docker default: return errors.Errorf("unknown package type: %v", string(text)) diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 7aabbba127c..d5bc6a182d8 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -179,7 +179,7 @@ func externalConfigsGlob() string { } func discoverer(patterns ...string) discoverFunc { - var p []string + p := make([]string, 0, len(patterns)) for _, newP := range patterns { if len(newP) == 0 { continue diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go index 5d97ed167a9..c9820822d6f 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go @@ -15,6 +15,10 @@ import ( "github.com/stretchr/testify/assert" ) +const ( + succ = "succ" +) + type FailingDownloader struct { called bool } @@ -32,7 +36,7 @@ type SuccDownloader struct { func (d *SuccDownloader) Download(ctx context.Context, _ artifact.Artifact, _ string) (string, error) { d.called = true - return "succ", nil + return succ, nil } func (d *SuccDownloader) Called() bool { return d.called } @@ -61,7 +65,7 @@ func TestComposed(t *testing.T) { d := NewDownloader(tc.downloaders[0], tc.downloaders[1]) r, _ := d.Download(context.TODO(), artifact.Artifact{Name: "a"}, "b") - assert.Equal(t, tc.expectedResult, r == "succ") + assert.Equal(t, tc.expectedResult, r == succ) assert.True(t, tc.checkFunc(tc.downloaders)) } diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 6aeb37e3541..15cefc09f7f 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -9,7 +9,6 @@ import ( "encoding/json" "fmt" "net" - "sync" "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" @@ -24,8 +23,6 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) -const agentName = "elastic-agent" - // Server is the daemon side of the control protocol. type Server struct { cproto.UnimplementedElasticAgentControlServer @@ -36,7 +33,6 @@ type Server struct { listener net.Listener server *grpc.Server tracer *apm.Tracer - lock sync.RWMutex } // New creates a new control protocol server. diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 090c5e06d59..d1ee5e371ff 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -168,6 +168,9 @@ func serviceComponentsFromConfig(specs component.RuntimeSpecs, cfg *config.Confi return nil, errors.New("failed to create a map from config", err) } allComps, err := specs.ToComponents(mm) + if err != nil { + return nil, fmt.Errorf("failed to render components: %w", err) + } var serviceComps []component.Component for _, comp := range allComps { if comp.Err == nil && comp.Spec.Spec.Service != nil {