From e98703caf3d36252b328e57ccdce6df3d7ef5167 Mon Sep 17 00:00:00 2001 From: Mat Schaffer Date: Mon, 21 Feb 2022 10:24:47 +0900 Subject: [PATCH 01/13] Spelling fix (#30439) --- docs/devguide/modules-dev-guide.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/devguide/modules-dev-guide.asciidoc b/docs/devguide/modules-dev-guide.asciidoc index ee264b15741..cab2b117133 100644 --- a/docs/devguide/modules-dev-guide.asciidoc +++ b/docs/devguide/modules-dev-guide.asciidoc @@ -494,7 +494,7 @@ docker run \ -p 9200:9200 -p 9300:9300 \ -e "xpack.security.http.ssl.enabled=false" -e "ELASTIC_PASSWORD=changeme" \ -e "discovery.type=single-node" \ - --pull allways --rm --detach \ + --pull always --rm --detach \ docker.elastic.co/elasticsearch/elasticsearch:master-SNAPSHOT ---- . Create an "admin" user on that Elasticsearch instance: From 4ab90f9b3339c6f8a5d198ad469305dcc7bbad53 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Mon, 21 Feb 2022 13:22:57 +1030 Subject: [PATCH 02/13] {,x-pack/}auditbeat: replace uses of github.com/pkg/errors with stdlib equivalents (#30321) * mechanical replacement of github.com/pkg/error with standard library error handling This is not completely automatable here due to some semantic differences between errors.Wrap and fmt.Errorf with the %w verb; the latter will convert a nil error into a non-nil error, so this must be guarded with a nil error check. The mechanical transformation was done by running the following commands: gofmt -w -r 'errors.Errorf -> fmt.Errorf' . gofmt -w -r 'errors.Wrap(e, m) -> fmt.Errorf(m+": %w", e)' . gofmt -w -r 'errors.Wrapf(e, m) -> fmt.Errorf(m+": %w", e)' . gofmt -w -r 'errors.Wrapf(e, m, a) -> fmt.Errorf(m+": %w", a, e)' . gofmt -w -r 'errors.Wrapf(e, m, a, b) -> fmt.Errorf(m+": %w", a, b, e)' . gofmt -w -r 'errors.Wrapf(e, m, a, b, c) -> fmt.Errorf(m+": %w", a, b, c, e)' . find . -name '*.go' -exec gsed -i -e 's/"+": %w",/: %w",/g' '{}' \; find . -name '*.go' -exec gsed -i -e 's!"github.com/pkg/errors"$!"errors"!g' '{}' \; goimports -w . go mod tidy gofumpt -w . Replaying that will give the changes here modulo some manual changes that are made in the next commit. There should be no uses of %w in this change that are not guarded by a nil-check. * manual replacement of github.com/pkg/error with standard library error handling This is the manual part of the change and includes some complete removal of errors and some silently passed bugs resulting from the use of Wrap. In one place the exact semantics of errors.Cause is implemented to fit in with a testing strategy. * replace unwrapping with errors.As --- auditbeat/cmd/root.go | 2 +- auditbeat/datastore/datastore.go | 2 +- auditbeat/helper/hasher/hasher.go | 19 +++---- auditbeat/helper/hasher/hasher_test.go | 8 +-- auditbeat/module/auditd/audit_linux.go | 55 ++++++++++--------- auditbeat/module/auditd/audit_linux_test.go | 2 +- auditbeat/module/auditd/audit_unsupported.go | 4 +- auditbeat/module/auditd/config_linux.go | 13 ++--- auditbeat/module/auditd/config_linux_test.go | 2 +- auditbeat/module/auditd/golden_files_test.go | 20 ++++--- auditbeat/module/auditd/show_linux.go | 9 ++- auditbeat/module/file_integrity/config.go | 14 +++-- auditbeat/module/file_integrity/event.go | 18 +++--- auditbeat/module/file_integrity/event_test.go | 4 +- .../file_integrity/eventreader_fsevents.go | 4 +- .../file_integrity/eventreader_fsnotify.go | 4 +- .../module/file_integrity/eventreader_test.go | 16 +++--- .../file_integrity/eventreader_unsupported.go | 2 +- .../module/file_integrity/fileinfo_posix.go | 4 +- .../module/file_integrity/fileinfo_test.go | 8 +-- .../module/file_integrity/fileinfo_windows.go | 12 ++-- .../file_integrity/fileorigin_darwin.go | 23 +++++--- .../module/file_integrity/flatbuffers.go | 4 +- auditbeat/module/file_integrity/metricset.go | 16 +++--- .../module/file_integrity/metricset_test.go | 16 +++--- auditbeat/module/file_integrity/mime_test.go | 2 +- .../module/file_integrity/monitor/filetree.go | 6 +- .../file_integrity/monitor/filetree_test.go | 45 ++++++++++----- .../file_integrity/monitor/monitor_test.go | 20 +++---- .../file_integrity/monitor/recursive.go | 18 +++--- .../module/file_integrity/scanner_test.go | 8 +-- auditbeat/scripts/mage/config.go | 9 ++- auditbeat/scripts/mage/docs.go | 6 +- auditbeat/scripts/mage/package.go | 15 ++--- x-pack/auditbeat/module/system/host/host.go | 15 +++-- x-pack/auditbeat/module/system/login/login.go | 6 +- .../module/system/login/login_test.go | 3 +- x-pack/auditbeat/module/system/login/utmp.go | 41 +++++++------- .../module/system/package/package.go | 37 ++++++------- .../module/system/package/package_homebrew.go | 13 ++--- .../module/system/package/package_test.go | 2 +- .../module/system/package/rpm_linux.go | 6 +- .../module/system/package/rpm_others.go | 2 +- .../module/system/process/namepace_linux.go | 3 +- .../module/system/process/process.go | 25 ++++----- .../system/socket/dns/afpacket/afpacket.go | 13 +++-- .../auditbeat/module/system/socket/dns/dns.go | 5 +- .../module/system/socket/guess/cskxmit6.go | 17 +++--- .../module/system/socket/guess/guess.go | 25 ++++----- .../module/system/socket/guess/helpers.go | 14 ++--- .../module/system/socket/guess/inetsock.go | 11 ++-- .../module/system/socket/guess/inetsock6.go | 17 +++--- .../module/system/socket/guess/inetsockaf.go | 3 +- .../module/system/socket/guess/skbuff.go | 28 +++++----- .../module/system/socket/guess/sockaddrin.go | 8 +-- .../module/system/socket/guess/sockaddrin6.go | 18 +++--- .../module/system/socket/helper/loopback.go | 18 +++--- .../auditbeat/module/system/socket/kprobes.go | 25 ++++----- .../module/system/socket/socket_linux.go | 24 ++++---- .../auditbeat/module/system/socket/state.go | 9 +-- .../module/system/socket/state_test.go | 4 +- x-pack/auditbeat/module/system/user/user.go | 23 ++++---- .../auditbeat/module/system/user/user_test.go | 4 +- .../module/system/user/users_linux.go | 14 ++--- x-pack/auditbeat/tracing/cpu.go | 7 +-- x-pack/auditbeat/tracing/events_test.go | 12 ++-- x-pack/auditbeat/tracing/perfevent.go | 16 +++--- 67 files changed, 444 insertions(+), 434 deletions(-) diff --git a/auditbeat/cmd/root.go b/auditbeat/cmd/root.go index aa5b523e976..79c74cf0279 100644 --- a/auditbeat/cmd/root.go +++ b/auditbeat/cmd/root.go @@ -54,7 +54,7 @@ var withECSVersion = processing.WithFields(common.MapStr{ // AuditbeatSettings contains the default settings for auditbeat func AuditbeatSettings() instance.Settings { - var runFlags = pflag.NewFlagSet(Name, pflag.ExitOnError) + runFlags := pflag.NewFlagSet(Name, pflag.ExitOnError) return instance.Settings{ RunFlags: runFlags, Name: Name, diff --git a/auditbeat/datastore/datastore.go b/auditbeat/datastore/datastore.go index d3cc9966f3f..57b2f379f0b 100644 --- a/auditbeat/datastore/datastore.go +++ b/auditbeat/datastore/datastore.go @@ -39,7 +39,7 @@ func OpenBucket(name string) (Bucket, error) { initDatastoreOnce.Do(func() { ds = &boltDatastore{ path: paths.Resolve(paths.Data, "beat.db"), - mode: 0600, + mode: 0o600, } }) diff --git a/auditbeat/helper/hasher/hasher.go b/auditbeat/helper/hasher/hasher.go index cc4c928867e..8bec9e1d253 100644 --- a/auditbeat/helper/hasher/hasher.go +++ b/auditbeat/helper/hasher/hasher.go @@ -33,7 +33,6 @@ import ( "github.com/cespare/xxhash/v2" "github.com/dustin/go-humanize" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/sha3" "golang.org/x/time/rate" @@ -143,7 +142,7 @@ func (c *Config) Validate() error { for _, ht := range c.HashTypes { if !ht.IsValid() { - errs = append(errs, errors.Errorf("invalid hash_types value '%v'", ht)) + errs = append(errs, fmt.Errorf("invalid hash_types value '%v'", ht)) } } @@ -151,14 +150,14 @@ func (c *Config) Validate() error { c.MaxFileSizeBytes, err = humanize.ParseBytes(c.MaxFileSize) if err != nil { - errs = append(errs, errors.Wrap(err, "invalid max_file_size value")) + errs = append(errs, fmt.Errorf("invalid max_file_size value: %w", err)) } else if c.MaxFileSizeBytes <= 0 { - errs = append(errs, errors.Errorf("max_file_size value (%v) must be positive", c.MaxFileSize)) + errs = append(errs, fmt.Errorf("max_file_size value (%v) must be positive", c.MaxFileSize)) } c.ScanRateBytesPerSec, err = humanize.ParseBytes(c.ScanRatePerSec) if err != nil { - errs = append(errs, errors.Wrap(err, "invalid scan_rate_per_sec value")) + errs = append(errs, fmt.Errorf("invalid scan_rate_per_sec value: %w", err)) } return errs.Err() @@ -189,14 +188,14 @@ func NewFileHasher(c Config, done <-chan struct{}) (*FileHasher, error) { func (hasher *FileHasher) HashFile(path string) (map[HashType]Digest, error) { info, err := os.Stat(path) if err != nil { - return nil, errors.Wrapf(err, "failed to stat file %v", path) + return nil, fmt.Errorf("failed to stat file %v: %w", path, err) } // Throttle reading and hashing rate. if len(hasher.config.HashTypes) > 0 { err = hasher.throttle(info.Size()) if err != nil { - return nil, errors.Wrapf(err, "failed to hash file %v", path) + return nil, fmt.Errorf("failed to hash file %v: %w", path, err) } } @@ -204,7 +203,7 @@ func (hasher *FileHasher) HashFile(path string) (map[HashType]Digest, error) { for _, hashType := range hasher.config.HashTypes { h, valid := validHashes[hashType] if !valid { - return nil, errors.Errorf("unknown hash type '%v'", hashType) + return nil, fmt.Errorf("unknown hash type '%v'", hashType) } hashes = append(hashes, h()) @@ -213,13 +212,13 @@ func (hasher *FileHasher) HashFile(path string) (map[HashType]Digest, error) { if len(hashes) > 0 { f, err := file.ReadOpen(path) if err != nil { - return nil, errors.Wrap(err, "failed to open file for hashing") + return nil, fmt.Errorf("failed to open file for hashing: %w", err) } defer f.Close() hashWriter := multiWriter(hashes) if _, err := io.Copy(hashWriter, f); err != nil { - return nil, errors.Wrap(err, "failed to calculate file hashes") + return nil, fmt.Errorf("failed to calculate file hashes: %w", err) } nameToHash := make(map[HashType]Digest, len(hashes)) diff --git a/auditbeat/helper/hasher/hasher_test.go b/auditbeat/helper/hasher/hasher_test.go index c9d781b35a4..2eb62a935c4 100644 --- a/auditbeat/helper/hasher/hasher_test.go +++ b/auditbeat/helper/hasher/hasher_test.go @@ -18,12 +18,12 @@ package hasher import ( + "errors" "io/ioutil" "os" "path/filepath" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) @@ -35,7 +35,7 @@ func TestHasher(t *testing.T) { defer os.RemoveAll(dir) file := filepath.Join(dir, "exe") - if err = ioutil.WriteFile(file, []byte("test exe\n"), 0600); err != nil { + if err = ioutil.WriteFile(file, []byte("test exe\n"), 0o600); err != nil { t.Fatal(err) } @@ -69,7 +69,7 @@ func TestHasherLimits(t *testing.T) { defer os.RemoveAll(dir) file := filepath.Join(dir, "exe") - if err = ioutil.WriteFile(file, []byte("test exe\n"), 0600); err != nil { + if err = ioutil.WriteFile(file, []byte("test exe\n"), 0o600); err != nil { t.Fatal(err) } @@ -88,5 +88,5 @@ func TestHasherLimits(t *testing.T) { hashes, err := hasher.HashFile(file) assert.Empty(t, hashes) assert.Error(t, err) - assert.IsType(t, FileTooLargeError{}, errors.Cause(err)) + assert.True(t, errors.As(err, &FileTooLargeError{})) } diff --git a/auditbeat/module/auditd/audit_linux.go b/auditbeat/module/auditd/audit_linux.go index 82ae7d61128..63febb6fcec 100644 --- a/auditbeat/module/auditd/audit_linux.go +++ b/auditbeat/module/auditd/audit_linux.go @@ -18,6 +18,7 @@ package auditd import ( + "errors" "fmt" "os" "runtime" @@ -27,8 +28,6 @@ import ( "syscall" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/monitoring" @@ -99,7 +98,7 @@ type MetricSet struct { func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := defaultConfig if err := base.Module().UnpackConfig(&config); err != nil { - return nil, errors.Wrap(err, "failed to unpack the auditd config") + return nil, fmt.Errorf("failed to unpack the auditd config: %w", err) } log := logp.NewLogger(moduleName) @@ -108,7 +107,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { client, err := newAuditClient(&config, log) if err != nil { - return nil, errors.Wrap(err, "failed to create audit client") + return nil, fmt.Errorf("failed to create audit client: %w", err) } reassemblerGapsMetric.Set(0) @@ -255,7 +254,7 @@ func (ms *MetricSet) addRules(reporter mb.PushReporterV2) error { client, err := libaudit.NewAuditClient(nil) if err != nil { - return errors.Wrap(err, "failed to create audit client for adding rules") + return fmt.Errorf("failed to create audit client for adding rules: %w", err) } defer closeAuditClient(client) @@ -263,7 +262,7 @@ func (ms *MetricSet) addRules(reporter mb.PushReporterV2) error { // Will result in EPERM. status, err := client.GetStatus() if err != nil { - err = errors.Wrap(err, "failed to get audit status before adding rules") + err = fmt.Errorf("failed to get audit status before adding rules: %w", err) reporter.Error(err) return err } @@ -274,7 +273,7 @@ func (ms *MetricSet) addRules(reporter mb.PushReporterV2) error { // Delete existing rules. n, err := client.DeleteRules() if err != nil { - return errors.Wrap(err, "failed to delete existing rules") + return fmt.Errorf("failed to delete existing rules: %w", err) } ms.log.Infof("Deleted %v pre-existing audit rules.", n) @@ -289,7 +288,7 @@ func (ms *MetricSet) addRules(reporter mb.PushReporterV2) error { for _, rule := range rules { if err = client.AddRule(rule.data); err != nil { // Treat rule add errors as warnings and continue. - err = errors.Wrapf(err, "failed to add audit rule '%v'", rule.flags) + err = fmt.Errorf("failed to add audit rule '%v': %w", rule.flags, err) reporter.Error(err) ms.log.Warnw("Failure adding audit rule", "error", err) failCount++ @@ -307,14 +306,17 @@ func (ms *MetricSet) initClient() error { // required to ensure that auditing is enabled if the process is only // given CAP_AUDIT_READ. err := ms.client.SetEnabled(true, libaudit.NoWait) - return errors.Wrap(err, "failed to enable auditing in the kernel") + if err != nil { + return fmt.Errorf("failed to enable auditing in the kernel: %w", err) + } + return nil } // Unicast client initialization (requires CAP_AUDIT_CONTROL and that the // process be in initial PID namespace). status, err := ms.client.GetStatus() if err != nil { - return errors.Wrap(err, "failed to get audit status") + return fmt.Errorf("failed to get audit status: %w", err) } ms.kernelLost.enabled = true ms.kernelLost.counter = status.Lost @@ -327,13 +329,13 @@ func (ms *MetricSet) initClient() error { if fm, _ := ms.config.failureMode(); status.Failure != fm { if err = ms.client.SetFailure(libaudit.FailureMode(fm), libaudit.NoWait); err != nil { - return errors.Wrap(err, "failed to set audit failure mode in kernel") + return fmt.Errorf("failed to set audit failure mode in kernel: %w", err) } } if status.BacklogLimit != ms.config.BacklogLimit { if err = ms.client.SetBacklogLimit(ms.config.BacklogLimit, libaudit.NoWait); err != nil { - return errors.Wrap(err, "failed to set audit backlog limit in kernel") + return fmt.Errorf("failed to set audit backlog limit in kernel: %w", err) } } @@ -345,7 +347,7 @@ func (ms *MetricSet) initClient() error { if status.FeatureBitmap&libaudit.AuditFeatureBitmapBacklogWaitTime != 0 { ms.log.Info("Setting kernel backlog wait time to prevent backpressure propagating to the kernel.") if err = ms.client.SetBacklogWaitTime(0, libaudit.NoWait); err != nil { - return errors.Wrap(err, "failed to set audit backlog wait time in kernel") + return fmt.Errorf("failed to set audit backlog wait time in kernel: %w", err) } } else { if ms.backpressureStrategy == bsAuto { @@ -365,38 +367,38 @@ func (ms *MetricSet) initClient() error { if status.RateLimit != ms.config.RateLimit { if err = ms.client.SetRateLimit(ms.config.RateLimit, libaudit.NoWait); err != nil { - return errors.Wrap(err, "failed to set audit rate limit in kernel") + return fmt.Errorf("failed to set audit rate limit in kernel: %w", err) } } if status.Enabled == 0 { if err = ms.client.SetEnabled(true, libaudit.NoWait); err != nil { - return errors.Wrap(err, "failed to enable auditing in the kernel") + return fmt.Errorf("failed to enable auditing in the kernel: %w", err) } } if err := ms.client.WaitForPendingACKs(); err != nil { - return errors.Wrap(err, "failed to wait for ACKs") + return fmt.Errorf("failed to wait for ACKs: %w", err) } if err := ms.setPID(setPIDMaxRetries); err != nil { if errno, ok := err.(syscall.Errno); ok && errno == syscall.EEXIST && status.PID != 0 { return fmt.Errorf("failed to set audit PID. An audit process is already running (PID %d)", status.PID) } - return errors.Wrapf(err, "failed to set audit PID (current audit PID %d)", status.PID) + return fmt.Errorf("failed to set audit PID (current audit PID %d): %w", status.PID, err) } return nil } func (ms *MetricSet) setPID(retries int) (err error) { - if err = ms.client.SetPID(libaudit.WaitForReply); err == nil || errors.Cause(err) != syscall.ENOBUFS || retries == 0 { + if err = ms.client.SetPID(libaudit.WaitForReply); err == nil || !errors.Is(err, syscall.ENOBUFS) || retries == 0 { return err } // At this point the netlink channel is congested (ENOBUFS). // Drain and close the client, then retry with a new client. closeAuditClient(ms.client) if ms.client, err = newAuditClient(&ms.config, ms.log); err != nil { - return errors.Wrapf(err, "failed to recover from ENOBUFS") + return fmt.Errorf("failed to recover from ENOBUFS: %w", err) } ms.log.Info("Recovering from ENOBUFS ...") return ms.setPID(retries - 1) @@ -438,7 +440,7 @@ func (ms *MetricSet) receiveEvents(done <-chan struct{}) (<-chan []*auparse.Audi } reassembler, err := libaudit.NewReassembler(int(ms.config.ReassemblerMaxInFlight), ms.config.ReassemblerTimeout, st) if err != nil { - return nil, errors.Wrap(err, "failed to create Reassembler") + return nil, fmt.Errorf("failed to create Reassembler: %w", err) } go maintain(done, reassembler) @@ -450,7 +452,7 @@ func (ms *MetricSet) receiveEvents(done <-chan struct{}) (<-chan []*auparse.Audi for { raw, err := ms.client.Receive(false) if err != nil { - if errors.Cause(err) == syscall.EBADF { + if errors.Is(err, syscall.EBADF) { // Client has been closed. break } @@ -941,17 +943,17 @@ func kernelVersion() (major, minor int, full string, err error) { release := string(data[:length]) parts := strings.SplitN(release, ".", 3) if len(parts) < 2 { - return 0, 0, release, errors.Errorf("failed to parse uname release '%v'", release) + return 0, 0, release, fmt.Errorf("failed to parse uname release '%v'", release) } major, err = strconv.Atoi(parts[0]) if err != nil { - return 0, 0, release, errors.Wrapf(err, "failed to parse major version from '%v'", release) + return 0, 0, release, fmt.Errorf("failed to parse major version from '%v': %w", release, err) } minor, err = strconv.Atoi(parts[1]) if err != nil { - return 0, 0, release, errors.Wrapf(err, "failed to parse minor version from '%v'", release) + return 0, 0, release, fmt.Errorf("failed to parse minor version from '%v': %w", release, err) } return major, minor, release, nil @@ -961,7 +963,7 @@ func determineSocketType(c *Config, log *logp.Logger) (string, error) { client, err := libaudit.NewAuditClient(nil) if err != nil { if c.SocketType == "" { - return "", errors.Wrap(err, "failed to create audit client") + return "", fmt.Errorf("failed to create audit client: %w", err) } // Ignore errors if a socket type has been specified. It will fail during // further setup and its necessary for unit tests to pass @@ -971,7 +973,7 @@ func determineSocketType(c *Config, log *logp.Logger) (string, error) { status, err := client.GetStatus() if err != nil { if c.SocketType == "" { - return "", errors.Wrap(err, "failed to get audit status") + return "", fmt.Errorf("failed to get audit status: %w", err) } return c.SocketType, nil } @@ -1031,7 +1033,6 @@ func determineSocketType(c *Config, log *logp.Logger) (string, error) { } return unicast, nil } - } func getBackpressureStrategy(value string, logger *logp.Logger) backpressureStrategy { diff --git a/auditbeat/module/auditd/audit_linux_test.go b/auditbeat/module/auditd/audit_linux_test.go index 38a3491fbf9..437acd6df55 100644 --- a/auditbeat/module/auditd/audit_linux_test.go +++ b/auditbeat/module/auditd/audit_linux_test.go @@ -329,7 +329,7 @@ func buildSampleEvent(t testing.TB, lines []string, filename string) { t.Fatal(err) } - if err := ioutil.WriteFile(filename, output, 0644); err != nil { + if err := ioutil.WriteFile(filename, output, 0o644); err != nil { t.Fatal(err) } } diff --git a/auditbeat/module/auditd/audit_unsupported.go b/auditbeat/module/auditd/audit_unsupported.go index 95aeccbec14..40c9db3bd78 100644 --- a/auditbeat/module/auditd/audit_unsupported.go +++ b/auditbeat/module/auditd/audit_unsupported.go @@ -21,7 +21,7 @@ package auditd import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" @@ -36,5 +36,5 @@ func init() { // New constructs a new MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - return nil, errors.Errorf("the %v module is only supported on Linux", metricsetName) + return nil, fmt.Errorf("the %v module is only supported on Linux", metricsetName) } diff --git a/auditbeat/module/auditd/config_linux.go b/auditbeat/module/auditd/config_linux.go index d0d90e86c44..ffcf9a463f6 100644 --- a/auditbeat/module/auditd/config_linux.go +++ b/auditbeat/module/auditd/config_linux.go @@ -29,7 +29,6 @@ import ( "time" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/go-libaudit/v2/rule" "github.com/elastic/go-libaudit/v2/rule/flags" @@ -102,7 +101,7 @@ func (c *Config) Validate() error { switch c.SocketType { case "", "unicast", "multicast": default: - errs = append(errs, errors.Errorf("invalid socket_type "+ + errs = append(errs, fmt.Errorf("invalid socket_type "+ "'%v' (use unicast, multicast, or don't set a value)", c.SocketType)) } @@ -161,7 +160,7 @@ func (c Config) failureMode() (uint32, error) { case "panic": return 2, nil default: - return 0, errors.Errorf("invalid failure_mode '%v' (use silent, log, or panic)", c.FailureMode) + return 0, fmt.Errorf("invalid failure_mode '%v' (use silent, log, or panic)", c.FailureMode) } } @@ -179,21 +178,21 @@ func readRules(reader io.Reader, source string, knownRules ruleSet) (rules []aud // Parse the CLI flags into an intermediate rule specification. r, err := flags.Parse(line) if err != nil { - errs = append(errs, errors.Wrapf(err, "at %s: failed to parse rule '%v'", location, line)) + errs = append(errs, fmt.Errorf("at %s: failed to parse rule '%v': %w", location, line, err)) continue } // Convert rule specification to a binary rule representation. data, err := rule.Build(r) if err != nil { - errs = append(errs, errors.Wrapf(err, "at %s: failed to interpret rule '%v'", location, line)) + errs = append(errs, fmt.Errorf("at %s: failed to interpret rule '%v': %w", location, line, err)) continue } // Detect duplicates based on the normalized binary rule representation. existing, found := knownRules[string(data)] if found { - errs = append(errs, errors.Errorf("at %s: rule '%v' is a duplicate of '%v' at %s", location, line, existing.rule.flags, existing.source)) + errs = append(errs, fmt.Errorf("at %s: rule '%v' is a duplicate of '%v' at %s", location, line, existing.rule.flags, existing.source)) continue } rule := auditRule{flags: line, data: []byte(data)} @@ -203,7 +202,7 @@ func readRules(reader io.Reader, source string, knownRules ruleSet) (rules []aud } if len(errs) > 0 { - return nil, errors.Wrap(errs.Err(), "failed loading rules") + return nil, fmt.Errorf("failed loading rules: %w", errs.Err()) } return rules, nil } diff --git a/auditbeat/module/auditd/config_linux_test.go b/auditbeat/module/auditd/config_linux_test.go index 8d677fdfcb7..6d8e913cf4e 100644 --- a/auditbeat/module/auditd/config_linux_test.go +++ b/auditbeat/module/auditd/config_linux_test.go @@ -94,7 +94,7 @@ func TestConfigValidateConnectionType(t *testing.T) { } func TestConfigRuleOrdering(t *testing.T) { - const fileMode = 0644 + const fileMode = 0o644 config := defaultConfig config.RulesBlob = strings.Join([]string{ makeRuleFlags(0, 0), diff --git a/auditbeat/module/auditd/golden_files_test.go b/auditbeat/module/auditd/golden_files_test.go index 79bf4a85546..47e99cf6b5e 100644 --- a/auditbeat/module/auditd/golden_files_test.go +++ b/auditbeat/module/auditd/golden_files_test.go @@ -124,14 +124,16 @@ func configForGolden() map[string]interface{} { } } -type TerminateFn func(mb.Event) bool -type terminableReporter struct { - events []mb.Event - ctx context.Context - cancel context.CancelFunc - err error - isLast TerminateFn -} +type ( + TerminateFn func(mb.Event) bool + terminableReporter struct { + events []mb.Event + ctx context.Context + cancel context.CancelFunc + err error + isLast TerminateFn + } +) func (r *terminableReporter) Event(event mb.Event) bool { if r.ctx.Err() != nil { @@ -215,7 +217,7 @@ func TestGoldenFiles(t *testing.T) { if err != nil { t.Fatal(err) } - if err = ioutil.WriteFile(goldenPath, data, 0644); err != nil { + if err = ioutil.WriteFile(goldenPath, data, 0o644); err != nil { t.Fatalf("failed writing golden file '%s': %v", goldenPath, err) } } diff --git a/auditbeat/module/auditd/show_linux.go b/auditbeat/module/auditd/show_linux.go index 856697086ab..efc8f8b623a 100644 --- a/auditbeat/module/auditd/show_linux.go +++ b/auditbeat/module/auditd/show_linux.go @@ -21,7 +21,6 @@ import ( "fmt" "os" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/elastic/go-libaudit/v2" @@ -69,13 +68,13 @@ func init() { func showAuditdRules() error { client, err := libaudit.NewAuditClient(nil) if err != nil { - return errors.Wrap(err, "failed to create audit client") + return fmt.Errorf("failed to create audit client: %w", err) } defer client.Close() rules, err := client.GetRules() if err != nil { - return errors.Wrap(err, "failed to list existing rules") + return fmt.Errorf("failed to list existing rules: %w", err) } for idx, raw := range rules { @@ -96,13 +95,13 @@ func showAuditdRules() error { func showAuditdStatus() error { client, err := libaudit.NewAuditClient(nil) if err != nil { - return errors.Wrap(err, "failed to create audit client") + return fmt.Errorf("failed to create audit client: %w", err) } defer client.Close() status, err := client.GetStatus() if err != nil { - return errors.Wrap(err, "failed to get audit status") + return fmt.Errorf("failed to get audit status: %w", err) } if status.FeatureBitmap == libaudit.AuditFeatureBitmapBacklogWaitTime { diff --git a/auditbeat/module/file_integrity/config.go b/auditbeat/module/file_integrity/config.go index 39882fe3e0f..f1fa35ebdca 100644 --- a/auditbeat/module/file_integrity/config.go +++ b/auditbeat/module/file_integrity/config.go @@ -18,6 +18,7 @@ package file_integrity import ( + "fmt" "math" "path/filepath" "sort" @@ -25,7 +26,6 @@ import ( "github.com/dustin/go-humanize" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common/match" ) @@ -110,19 +110,21 @@ nextHash: continue nextHash } } - errs = append(errs, errors.Errorf("invalid hash_types value '%v'", ht)) + errs = append(errs, fmt.Errorf("invalid hash_types value '%v'", ht)) } c.MaxFileSizeBytes, err = humanize.ParseBytes(c.MaxFileSize) - if err != nil || c.MaxFileSizeBytes > MaxValidFileSizeLimit { - errs = append(errs, errors.Wrap(err, "invalid max_file_size value")) + if err != nil { + errs = append(errs, fmt.Errorf("invalid max_file_size value: %w", err)) + } else if c.MaxFileSizeBytes > MaxValidFileSizeLimit { + errs = append(errs, fmt.Errorf("invalid max_file_size value: %s is too large (max=%s)", c.MaxFileSize, humanize.Bytes(MaxValidFileSizeLimit))) } else if c.MaxFileSizeBytes <= 0 { - errs = append(errs, errors.Errorf("max_file_size value (%v) must be positive", c.MaxFileSize)) + errs = append(errs, fmt.Errorf("max_file_size value (%v) must be positive", c.MaxFileSize)) } c.ScanRateBytesPerSec, err = humanize.ParseBytes(c.ScanRatePerSec) if err != nil { - errs = append(errs, errors.Wrap(err, "invalid scan_rate_per_sec value")) + errs = append(errs, fmt.Errorf("invalid scan_rate_per_sec value: %w", err)) } return errs.Err() } diff --git a/auditbeat/module/file_integrity/event.go b/auditbeat/module/file_integrity/event.go index 1bfc807dfa6..5e423d04fec 100644 --- a/auditbeat/module/file_integrity/event.go +++ b/auditbeat/module/file_integrity/event.go @@ -36,7 +36,6 @@ import ( "time" "github.com/cespare/xxhash/v2" - "github.com/pkg/errors" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/sha3" @@ -214,11 +213,14 @@ func NewEvent( hashTypes []HashType, ) Event { info, err := os.Lstat(path) - if err != nil && os.IsNotExist(err) { - // deleted file is signaled by info == nil - err = nil + if err != nil { + if os.IsNotExist(err) { + // deleted file is signaled by info == nil + err = nil + } else { + err = fmt.Errorf("failed to lstat: %w", err) + } } - err = errors.Wrap(err, "failed to lstat") return NewEventFromFileInfo(path, info, err, action, source, maxFileSize, hashTypes) } @@ -449,13 +451,13 @@ func hashFile(name string, maxSize uint64, hashType ...HashType) (nameToHash map case XXH64: hashes = append(hashes, xxhash.New()) default: - return nil, 0, errors.Errorf("unknown hash type '%v'", name) + return nil, 0, fmt.Errorf("unknown hash type '%v'", name) } } f, err := file.ReadOpen(name) if err != nil { - return nil, 0, errors.Wrap(err, "failed to open file for hashing") + return nil, 0, fmt.Errorf("failed to open file for hashing: %w", err) } defer f.Close() @@ -469,7 +471,7 @@ func hashFile(name string, maxSize uint64, hashType ...HashType) (nameToHash map } written, err := io.Copy(hashWriter, r) if err != nil { - return nil, 0, errors.Wrap(err, "failed to calculate file hashes") + return nil, 0, fmt.Errorf("failed to calculate file hashes: %w", err) } // The file grew larger than configured limit. diff --git a/auditbeat/module/file_integrity/event_test.go b/auditbeat/module/file_integrity/event_test.go index de14b3e2d13..b45bd4895fc 100644 --- a/auditbeat/module/file_integrity/event_test.go +++ b/auditbeat/module/file_integrity/event_test.go @@ -47,7 +47,7 @@ func testEvent() *Event { Inode: 123, UID: 500, GID: 500, - Mode: 0600, + Mode: 0o600, CTime: testEventTime, MTime: testEventTime, SetGID: true, @@ -94,7 +94,7 @@ func TestDiffEvents(t *testing.T) { t.Run("updated metadata", func(t *testing.T) { e := testEvent() - e.Info.Mode = 0644 + e.Info.Mode = 0o644 action, changed := diffEvents(testEvent(), e) assert.True(t, changed) diff --git a/auditbeat/module/file_integrity/eventreader_fsevents.go b/auditbeat/module/file_integrity/eventreader_fsevents.go index 7dc7b0975af..ac4309da53e 100644 --- a/auditbeat/module/file_integrity/eventreader_fsevents.go +++ b/auditbeat/module/file_integrity/eventreader_fsevents.go @@ -21,13 +21,13 @@ package file_integrity import ( + "fmt" "os" "path/filepath" "strings" "time" "github.com/fsnotify/fsevents" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -204,7 +204,7 @@ func getFileInfo(path string) (os.FileInfo, error) { path = resolved } info, err := os.Lstat(path) - return info, errors.Wrap(err, "failed to stat") + return info, fmt.Errorf("failed to stat: %w", err) } func (r *fsreader) isWatched(path string) bool { diff --git a/auditbeat/module/file_integrity/eventreader_fsnotify.go b/auditbeat/module/file_integrity/eventreader_fsnotify.go index 7f3de5beb99..59aa48a029b 100644 --- a/auditbeat/module/file_integrity/eventreader_fsnotify.go +++ b/auditbeat/module/file_integrity/eventreader_fsnotify.go @@ -21,12 +21,12 @@ package file_integrity import ( + "fmt" "path/filepath" "syscall" "time" "github.com/fsnotify/fsnotify" - "github.com/pkg/errors" "github.com/elastic/beats/v7/auditbeat/module/file_integrity/monitor" "github.com/elastic/beats/v7/libbeat/logp" @@ -57,7 +57,7 @@ func (r *reader) Start(done <-chan struct{}) (<-chan Event, error) { if err := r.watcher.Start(); err != nil { // Ensure that watcher is closed so that we don't leak watchers r.watcher.Close() - return nil, errors.Wrap(err, "unable to start watcher") + return nil, fmt.Errorf("unable to start watcher: %w", err) } queueDone := make(chan struct{}) diff --git a/auditbeat/module/file_integrity/eventreader_test.go b/auditbeat/module/file_integrity/eventreader_test.go index 53db8b28c3e..f18d0817df3 100644 --- a/auditbeat/module/file_integrity/eventreader_test.go +++ b/auditbeat/module/file_integrity/eventreader_test.go @@ -23,12 +23,12 @@ import ( "os" "path/filepath" "runtime" + "runtime/debug" "strings" "syscall" "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) @@ -73,7 +73,7 @@ func TestEventReader(t *testing.T) { // Create a new file. txt1 := filepath.Join(dir, "test1.txt") - var fileMode os.FileMode = 0640 + var fileMode os.FileMode = 0o640 mustRun(t, "created", func(t *testing.T) { if err = ioutil.WriteFile(txt1, []byte("hello"), fileMode); err != nil { t.Fatal(err) @@ -129,14 +129,14 @@ func TestEventReader(t *testing.T) { t.Skip() } - if err = os.Chmod(txt2, 0644); err != nil { + if err = os.Chmod(txt2, 0o644); err != nil { t.Fatal(err) } event := readTimeout(t, events) assertSameFile(t, txt2, event.Path) assert.EqualValues(t, AttributesModified, AttributesModified&event.Action) - assert.EqualValues(t, 0644, event.Info.Mode) + assert.EqualValues(t, 0o644, event.Info.Mode) }) // Append data to the file. @@ -153,7 +153,7 @@ func TestEventReader(t *testing.T) { assertSameFile(t, txt2, event.Path) assert.EqualValues(t, Updated, Updated&event.Action) if runtime.GOOS != "windows" { - assert.EqualValues(t, 0644, event.Info.Mode) + assert.EqualValues(t, 0o644, event.Info.Mode) } }) @@ -182,7 +182,7 @@ func TestEventReader(t *testing.T) { // Create a sub-directory. subDir := filepath.Join(dir, "subdir") mustRun(t, "dir created", func(t *testing.T) { - if err = os.Mkdir(subDir, 0755); err != nil { + if err = os.Mkdir(subDir, 0o755); err != nil { t.Fatal(err) } @@ -243,7 +243,7 @@ func TestEventReader(t *testing.T) { func TestRaces(t *testing.T) { t.Skip("Flaky test: about 1/20 of builds fails https://github.com/elastic/beats/issues/21303") const ( - fileMode os.FileMode = 0640 + fileMode os.FileMode = 0o640 N = 100 ) @@ -314,7 +314,7 @@ func TestRaces(t *testing.T) { func readTimeout(t testing.TB, events <-chan Event) Event { select { case <-time.After(time.Second): - t.Fatalf("%+v", errors.Errorf("timed-out waiting for event")) + t.Fatalf("timed-out waiting for event:\n%s", debug.Stack()) case e, ok := <-events: if !ok { t.Fatal("failed reading from event channel") diff --git a/auditbeat/module/file_integrity/eventreader_unsupported.go b/auditbeat/module/file_integrity/eventreader_unsupported.go index 5fa39b2fa2c..0fe95102c2c 100644 --- a/auditbeat/module/file_integrity/eventreader_unsupported.go +++ b/auditbeat/module/file_integrity/eventreader_unsupported.go @@ -20,7 +20,7 @@ package file_integrity -import "github.com/pkg/errors" +import "errors" func NewEventReader(c Config) (EventProducer, error) { return errors.New("file auditing metricset is not implemented on this system") diff --git a/auditbeat/module/file_integrity/fileinfo_posix.go b/auditbeat/module/file_integrity/fileinfo_posix.go index a49cf1e5bd6..536a9d2a4dc 100644 --- a/auditbeat/module/file_integrity/fileinfo_posix.go +++ b/auditbeat/module/file_integrity/fileinfo_posix.go @@ -21,13 +21,13 @@ package file_integrity import ( + "fmt" "os" "os/user" "strconv" "syscall" "github.com/joeshaw/multierror" - "github.com/pkg/errors" ) // NewMetadata returns a new Metadata object. If an error is returned it is @@ -36,7 +36,7 @@ import ( func NewMetadata(path string, info os.FileInfo) (*Metadata, error) { stat, ok := info.Sys().(*syscall.Stat_t) if !ok { - return nil, errors.Errorf("unexpected fileinfo sys type %T for %v", info.Sys(), path) + return nil, fmt.Errorf("unexpected fileinfo sys type %T for %v", info.Sys(), path) } fileInfo := &Metadata{ diff --git a/auditbeat/module/file_integrity/fileinfo_test.go b/auditbeat/module/file_integrity/fileinfo_test.go index ea3a16b65ed..0375e06777b 100644 --- a/auditbeat/module/file_integrity/fileinfo_test.go +++ b/auditbeat/module/file_integrity/fileinfo_test.go @@ -86,7 +86,7 @@ func TestNewMetadata(t *testing.T) { assert.Equal(t, group.Name, meta.Group) assert.Empty(t, meta.SID) - assert.EqualValues(t, 0600, meta.Mode) + assert.EqualValues(t, 0o600, meta.Mode) } assert.EqualValues(t, len("metadata test"), meta.Size, "size") @@ -127,9 +127,9 @@ func TestSetUIDSetGIDBits(t *testing.T) { } for _, flags := range []os.FileMode{ - 0600 | os.ModeSetuid, - 0600 | os.ModeSetgid, - 0600 | os.ModeSetuid | os.ModeSetuid, + 0o600 | os.ModeSetuid, + 0o600 | os.ModeSetgid, + 0o600 | os.ModeSetuid | os.ModeSetuid, } { msg := fmt.Sprintf("checking flags %04o", flags) if err = os.Chmod(f.Name(), flags); err != nil { diff --git a/auditbeat/module/file_integrity/fileinfo_windows.go b/auditbeat/module/file_integrity/fileinfo_windows.go index 0766e61ae3f..cb98b598bc7 100644 --- a/auditbeat/module/file_integrity/fileinfo_windows.go +++ b/auditbeat/module/file_integrity/fileinfo_windows.go @@ -28,7 +28,6 @@ import ( "unsafe" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common/file" ) @@ -39,7 +38,7 @@ import ( func NewMetadata(path string, info os.FileInfo) (*Metadata, error) { attrs, ok := info.Sys().(*syscall.Win32FileAttributeData) if !ok { - return nil, errors.Errorf("unexpected fileinfo sys type %T for %v", info.Sys(), path) + return nil, fmt.Errorf("unexpected fileinfo sys type %T for %v", info.Sys(), path) } var errs multierror.Errors @@ -69,12 +68,11 @@ func NewMetadata(path string, info os.FileInfo) (*Metadata, error) { var err error if !info.IsDir() { if fileInfo.SID, fileInfo.Owner, err = fileOwner(path); err != nil { - errs = append(errs, errors.Wrap(err, "fileOwner failed")) + errs = append(errs, fmt.Errorf("fileOwner failed: %w", err)) } - } if fileInfo.Origin, err = GetFileOrigin(path); err != nil { - errs = append(errs, errors.Wrap(err, "GetFileOrigin failed")) + errs = append(errs, fmt.Errorf("GetFileOrigin failed: %w", err)) } return fileInfo, errs.Err() } @@ -86,11 +84,11 @@ func fileOwner(path string) (sid, owner string, err error) { pathW, err := syscall.UTF16PtrFromString(path) if err != nil { - return sid, owner, errors.Wrapf(err, "failed to convert path:'%s' to UTF16", path) + return sid, owner, fmt.Errorf("failed to convert path:'%s' to UTF16: %w", path, err) } if err = GetNamedSecurityInfo(pathW, FileObject, OwnerSecurityInformation, &securityID, nil, nil, nil, &securityDescriptor); err != nil { - return "", "", errors.Wrapf(err, "failed on GetSecurityInfo for %v", path) + return "", "", fmt.Errorf("failed on GetSecurityInfo for %v: %w", path, err) } defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(securityDescriptor))) diff --git a/auditbeat/module/file_integrity/fileorigin_darwin.go b/auditbeat/module/file_integrity/fileorigin_darwin.go index 6efae8bd856..227bef1e4da 100644 --- a/auditbeat/module/file_integrity/fileorigin_darwin.go +++ b/auditbeat/module/file_integrity/fileorigin_darwin.go @@ -27,10 +27,11 @@ package file_integrity import "C" import ( + "errors" + "fmt" "syscall" "unsafe" - "github.com/pkg/errors" "howett.net/plist" ) @@ -73,9 +74,13 @@ func GetFileOrigin(path string) ([]string, error) { defer C.free(unsafe.Pointer(cPath)) // Query length kMDItemWhereFroms extended-attribute - attrSize, err := C.getxattr(cPath, kMDItemWhereFroms, nil, 0, 0, 0) + attrSize, errno := C.getxattr(cPath, kMDItemWhereFroms, nil, 0, 0, 0) if attrSize == -1 { - return nil, errors.Wrap(filterErrno(err), "getxattr: query attribute length failed") + err := filterErrno(errno) + if err != nil { + return nil, fmt.Errorf("getxattr: query attribute length failed: %w", err) + } + return nil, nil } if attrSize == 0 { return nil, nil @@ -83,9 +88,13 @@ func GetFileOrigin(path string) ([]string, error) { // Read the kMDItemWhereFroms attribute data := make([]byte, attrSize) - newSize, err := C.getxattr(cPath, kMDItemWhereFroms, unsafe.Pointer(&data[0]), C.size_t(attrSize), 0, 0) + newSize, errno := C.getxattr(cPath, kMDItemWhereFroms, unsafe.Pointer(&data[0]), C.size_t(attrSize), 0, 0) if newSize == -1 { - return nil, errors.Wrap(filterErrno(err), "getxattr failed") + err := filterErrno(errno) + if err != nil { + return nil, fmt.Errorf("getxattr failed: %w", filterErrno(err)) + } + return nil, nil } if newSize != attrSize { return nil, errors.New("getxattr: attribute changed while reading") @@ -93,8 +102,8 @@ func GetFileOrigin(path string) ([]string, error) { // Decode plist format. A list of strings is expected var urls []string - if _, err = plist.Unmarshal(data, &urls); err != nil { - return nil, errors.Wrap(err, "plist unmarshal failed") + if _, err := plist.Unmarshal(data, &urls); err != nil { + return nil, fmt.Errorf("plist unmarshal failed: %w", err) } // The returned list seems to be padded with empty strings when some of diff --git a/auditbeat/module/file_integrity/flatbuffers.go b/auditbeat/module/file_integrity/flatbuffers.go index 09de9f0b5e4..97b039fea90 100644 --- a/auditbeat/module/file_integrity/flatbuffers.go +++ b/auditbeat/module/file_integrity/flatbuffers.go @@ -18,12 +18,12 @@ package file_integrity import ( + "fmt" "os" "sync" "time" flatbuffers "github.com/google/flatbuffers/go" - "github.com/pkg/errors" "github.com/elastic/beats/v7/auditbeat/module/file_integrity/schema" ) @@ -332,7 +332,7 @@ func fbDecodeHash(e *schema.Event) map[HashType]Digest { length = hash.Xxh64Length() producer = hash.Xxh64 default: - panic(errors.Errorf("unhandled hash type: %v", hashType)) + panic(fmt.Errorf("unhandled hash type: %v", hashType)) } if length > 0 { diff --git a/auditbeat/module/file_integrity/metricset.go b/auditbeat/module/file_integrity/metricset.go index cd836f472d5..258ed3a47dc 100644 --- a/auditbeat/module/file_integrity/metricset.go +++ b/auditbeat/module/file_integrity/metricset.go @@ -19,11 +19,11 @@ package file_integrity import ( "bytes" + "fmt" "os" "path/filepath" "time" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" "github.com/elastic/beats/v7/auditbeat/datastore" @@ -89,7 +89,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { r, err := NewEventReader(config) if err != nil { - return nil, errors.Wrap(err, "failed to initialize file event reader") + return nil, fmt.Errorf("failed to initialize file event reader: %w", err) } ms := &MetricSet{ @@ -154,7 +154,7 @@ func (ms *MetricSet) Close() error { func (ms *MetricSet) init(reporter mb.PushReporterV2) bool { bucket, err := datastore.OpenBucket(bucketName) if err != nil { - err = errors.Wrap(err, "failed to open persistent datastore") + err = fmt.Errorf("failed to open persistent datastore: %w", err) reporter.Error(err) ms.log.Errorw("Failed to initialize", "error", err) return false @@ -163,7 +163,7 @@ func (ms *MetricSet) init(reporter mb.PushReporterV2) bool { ms.fsnotifyChan, err = ms.reader.Start(reporter.Done()) if err != nil { - err = errors.Wrap(err, "failed to start fsnotify event producer") + err = fmt.Errorf("failed to start fsnotify event producer: %w", err) reporter.Error(err) ms.log.Errorw("Failed to initialize", "error", err) return false @@ -173,7 +173,7 @@ func (ms *MetricSet) init(reporter mb.PushReporterV2) bool { if ms.config.ScanAtStart { ms.scanner, err = NewFileSystemScanner(ms.config, ms.findNewPaths()) if err != nil { - err = errors.Wrap(err, "failed to initialize file scanner") + err = fmt.Errorf("failed to initialize file scanner: %w", err) reporter.Error(err) ms.log.Errorw("Failed to initialize", "error", err) return false @@ -181,7 +181,7 @@ func (ms *MetricSet) init(reporter mb.PushReporterV2) bool { ms.scanChan, err = ms.scanner.Start(reporter.Done()) if err != nil { - err = errors.Wrap(err, "failed to start file scanner") + err = fmt.Errorf("failed to start file scanner: %w", err) reporter.Error(err) ms.log.Errorw("Failed to initialize", "error", err) return false @@ -370,7 +370,7 @@ func store(b datastore.Bucket, e *Event) error { data := fbEncodeEvent(builder, e) if err := b.Store(e.Path, data); err != nil { - return errors.Wrapf(err, "failed to locally store event for %v", e.Path) + return fmt.Errorf("failed to locally store event for %v: %w", e.Path, err) } return nil } @@ -385,7 +385,7 @@ func load(b datastore.Bucket, path string) (*Event, error) { return nil }) if err != nil { - return nil, errors.Wrapf(err, "failed to load locally persisted event for %v", path) + return nil, fmt.Errorf("failed to load locally persisted event for %v: %w", path, err) } return e, nil } diff --git a/auditbeat/module/file_integrity/metricset_test.go b/auditbeat/module/file_integrity/metricset_test.go index 13e27cf82ea..a28a2adb90a 100644 --- a/auditbeat/module/file_integrity/metricset_test.go +++ b/auditbeat/module/file_integrity/metricset_test.go @@ -49,7 +49,7 @@ func TestData(t *testing.T) { go func() { time.Sleep(100 * time.Millisecond) file := filepath.Join(dir, "file.data") - ioutil.WriteFile(file, []byte("hello world"), 0600) + ioutil.WriteFile(file, []byte("hello world"), 0o600) }() ms := mbtest.NewPushMetricSetV2(t, getConfig(dir)) @@ -136,8 +136,8 @@ func TestActions(t *testing.T) { } // Create some files in first directory - ioutil.WriteFile(createdFilepath, []byte("hello world"), 0600) - ioutil.WriteFile(updatedFilepath, []byte("hello world"), 0600) + ioutil.WriteFile(createdFilepath, []byte("hello world"), 0o600) + ioutil.WriteFile(updatedFilepath, []byte("hello world"), 0o600) ms := mbtest.NewPushMetricSetV2(t, getConfig(dir, newDir)) events := mbtest.RunPushMetricSetV2(10*time.Second, 5, ms) @@ -201,7 +201,7 @@ func TestExcludedFiles(t *testing.T) { go func() { for _, f := range []string{"FILE.TXT", "FILE.TXT.SWP", "file.txt.swo", ".git/HEAD", ".gitignore"} { file := filepath.Join(dir, f) - ioutil.WriteFile(file, []byte("hello world"), 0600) + ioutil.WriteFile(file, []byte("hello world"), 0o600) } }() @@ -252,7 +252,7 @@ func TestIncludedExcludedFiles(t *testing.T) { t.Fatal(err) } - err = os.Mkdir(filepath.Join(dir, ".ssh"), 0700) + err = os.Mkdir(filepath.Join(dir, ".ssh"), 0o700) if err != nil { t.Fatal(err) } @@ -264,7 +264,7 @@ func TestIncludedExcludedFiles(t *testing.T) { for _, f := range []string{"FILE.TXT", ".ssh/known_hosts", ".ssh/known_hosts.swp"} { file := filepath.Join(dir, f) - err := ioutil.WriteFile(file, []byte("hello world"), 0600) + err := ioutil.WriteFile(file, []byte("hello world"), 0o600) if err != nil { t.Fatal(err) } @@ -493,7 +493,7 @@ func (e expectedEvents) validate(t *testing.T) { } defer store.Close() defer os.Remove(store.Name()) - ds := datastore.New(store.Name(), 0644) + ds := datastore.New(store.Name(), 0o644) bucket, err := ds.OpenBucket(bucketName) if err != nil { t.Fatal(err) @@ -765,7 +765,7 @@ func TestEventDelete(t *testing.T) { } defer store.Close() defer os.Remove(store.Name()) - ds := datastore.New(store.Name(), 0644) + ds := datastore.New(store.Name(), 0o644) bucket, err := ds.OpenBucket(bucketName) if err != nil { t.Fatal(err) diff --git a/auditbeat/module/file_integrity/mime_test.go b/auditbeat/module/file_integrity/mime_test.go index a4779c96fe8..cefc0cda8a7 100644 --- a/auditbeat/module/file_integrity/mime_test.go +++ b/auditbeat/module/file_integrity/mime_test.go @@ -54,7 +54,7 @@ func TestGetMimeType(t *testing.T) { for extension, sample := range mimeSamples { samplePath := filepath.Join(dir, "sample."+extension) - if err := ioutil.WriteFile(samplePath, sample, 0700); err != nil { + if err := ioutil.WriteFile(samplePath, sample, 0o700); err != nil { t.Fatal(err) } } diff --git a/auditbeat/module/file_integrity/monitor/filetree.go b/auditbeat/module/file_integrity/monitor/filetree.go index fcd9b94176d..2ff72c12cb0 100644 --- a/auditbeat/module/file_integrity/monitor/filetree.go +++ b/auditbeat/module/file_integrity/monitor/filetree.go @@ -34,10 +34,8 @@ const ( PostOrder ) -var ( - // PathSeparator can be used to override the operating system separator. - PathSeparator = string(os.PathSeparator) -) +// PathSeparator can be used to override the operating system separator. +var PathSeparator = string(os.PathSeparator) // FileTree represents a directory in a filesystem-tree structure. type FileTree map[string]FileTree diff --git a/auditbeat/module/file_integrity/monitor/filetree_test.go b/auditbeat/module/file_integrity/monitor/filetree_test.go index 9b7a6650035..0442aa9fdc5 100644 --- a/auditbeat/module/file_integrity/monitor/filetree_test.go +++ b/auditbeat/module/file_integrity/monitor/filetree_test.go @@ -50,24 +50,36 @@ func TestVisit(t *testing.T) { result []string isDir []bool }{ - {"/", + { + "/", []string{"/", "/tmp", "/usr", "/usr/bin", "/usr/bin/python", "/usr/bin/tar", "/usr/lib", "/usr/lib/libz.a"}, - []bool{true, true, true, true, false, false, true, false}}, - {"/usr", + []bool{true, true, true, true, false, false, true, false}, + }, + { + "/usr", []string{"/usr", "/usr/bin", "/usr/bin/python", "/usr/bin/tar", "/usr/lib", "/usr/lib/libz.a"}, - []bool{true, true, false, false, true, false}}, - {"/usr/bin", + []bool{true, true, false, false, true, false}, + }, + { + "/usr/bin", []string{"/usr/bin", "/usr/bin/python", "/usr/bin/tar"}, - []bool{true, false, false}}, - {"/usr/lib", + []bool{true, false, false}, + }, + { + "/usr/lib", []string{"/usr/lib", "/usr/lib/libz.a"}, - []bool{true, false}}, - {"/tmp/", + []bool{true, false}, + }, + { + "/tmp/", []string{"/tmp"}, - []bool{true}}, - {"/usr/bin/python", + []bool{true}, + }, + { + "/usr/bin/python", []string{"/usr/bin/python"}, - []bool{false}}, + []bool{false}, + }, } { for _, order := range []VisitOrder{PreOrder, PostOrder} { failMsg := fmt.Sprintf("test entry %d for path '%s' order:%v", testIdx, testData.dir, order) @@ -148,14 +160,17 @@ func TestVisitCancel(t *testing.T) { expected []visitParams }{ {PreOrder, "/a", []visitParams{ - {"/", true}}}, + {"/", true}, + }}, {PostOrder, "/a", []visitParams{ {"/a/b/file", false}, - {"/a/b", true}}}, + {"/a/b", true}, + }}, {PreOrder, "/a/b/file", []visitParams{ {"/", true}, {"/a", true}, - {"/a/b", true}}}, + {"/a/b", true}, + }}, } { failMsg := fmt.Sprintf("test at index %d", idx) var result []visitParams diff --git a/auditbeat/module/file_integrity/monitor/monitor_test.go b/auditbeat/module/file_integrity/monitor/monitor_test.go index 3842948ce0b..37a2fe7fdb0 100644 --- a/auditbeat/module/file_integrity/monitor/monitor_test.go +++ b/auditbeat/module/file_integrity/monitor/monitor_test.go @@ -59,7 +59,7 @@ func TestNonRecursive(t *testing.T) { testDirOps(t, dir, watcher) subdir := filepath.Join(dir, "subdir") - os.Mkdir(subdir, 0750) + os.Mkdir(subdir, 0o750) ev, err := readTimeout(t, watcher) assertNoError(t, err) @@ -68,7 +68,7 @@ func TestNonRecursive(t *testing.T) { // subdirs are not watched subfile := filepath.Join(subdir, "file.dat") - assertNoError(t, ioutil.WriteFile(subfile, []byte("foo"), 0640)) + assertNoError(t, ioutil.WriteFile(subfile, []byte("foo"), 0o640)) _, err = readTimeout(t, watcher) assert.Error(t, err) @@ -107,7 +107,7 @@ func TestRecursive(t *testing.T) { testDirOps(t, dir, watcher) subdir := filepath.Join(dir, "subdir") - os.Mkdir(subdir, 0750) + os.Mkdir(subdir, 0o750) ev, err := readTimeout(t, watcher) assertNoError(t, err) @@ -161,7 +161,7 @@ func TestRecursiveNoFollowSymlink(t *testing.T) { // Create a file in the other dir file := filepath.Join(linkedDir, "not.seen") - assertNoError(t, ioutil.WriteFile(file, []byte("hello"), 0640)) + assertNoError(t, ioutil.WriteFile(file, []byte("hello"), 0o640)) // No event is received ev, err := readTimeout(t, watcher) @@ -203,8 +203,8 @@ func TestRecursiveSubdirPermissions(t *testing.T) { for _, name := range []string{"a", "b", "c"} { path := filepath.Join(outDir, name) - assertNoError(t, os.Mkdir(path, 0755)) - assertNoError(t, ioutil.WriteFile(filepath.Join(path, name), []byte("Hello"), 0644)) + assertNoError(t, os.Mkdir(path, 0o755)) + assertNoError(t, ioutil.WriteFile(filepath.Join(path, name), []byte("Hello"), 0o644)) } // Make a subdir not accessible @@ -299,8 +299,8 @@ func TestRecursiveExcludedPaths(t *testing.T) { for _, name := range []string{"a", "b", "c"} { path := filepath.Join(outDir, name) - assertNoError(t, os.Mkdir(path, 0755)) - assertNoError(t, ioutil.WriteFile(filepath.Join(path, name), []byte("Hello"), 0644)) + assertNoError(t, os.Mkdir(path, 0o755)) + assertNoError(t, ioutil.WriteFile(filepath.Join(path, name), []byte("Hello"), 0o644)) } // excludes file/dir named "b" @@ -371,7 +371,7 @@ func testDirOps(t *testing.T, dir string, watcher Watcher) { fpath2 := filepath.Join(dir, "file2.txt") // Create - assertNoError(t, ioutil.WriteFile(fpath, []byte("hello"), 0640)) + assertNoError(t, ioutil.WriteFile(fpath, []byte("hello"), 0o640)) ev, err := readTimeout(t, watcher) assertNoError(t, err) @@ -382,7 +382,7 @@ func testDirOps(t *testing.T, dir string, watcher Watcher) { // Repeat the write if no event is received. Under macOS often // the write fails to generate a write event for non-recursive watcher for i := 0; i < 3; i++ { - f, err := os.OpenFile(fpath, os.O_RDWR|os.O_APPEND, 0640) + f, err := os.OpenFile(fpath, os.O_RDWR|os.O_APPEND, 0o640) assertNoError(t, err) f.WriteString(" world\n") f.Sync() diff --git a/auditbeat/module/file_integrity/monitor/recursive.go b/auditbeat/module/file_integrity/monitor/recursive.go index 14cc99379d5..aa41cdd75a2 100644 --- a/auditbeat/module/file_integrity/monitor/recursive.go +++ b/auditbeat/module/file_integrity/monitor/recursive.go @@ -18,12 +18,12 @@ package monitor import ( + "fmt" "os" "path/filepath" "github.com/fsnotify/fsnotify" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -96,7 +96,7 @@ func (watcher *recursiveWatcher) addRecursive(path string) error { } if fnErr != nil { - errs = append(errs, errors.Wrapf(fnErr, "error walking path '%s'", path)) + errs = append(errs, fmt.Errorf("error walking path '%s': %w", path, fnErr)) // If FileInfo is not nil, the directory entry can be processed // even if there was some error if info == nil { @@ -107,7 +107,7 @@ func (watcher *recursiveWatcher) addRecursive(path string) error { if info.IsDir() { if err = watcher.tree.AddDir(path); err == nil { if err = watcher.inner.Add(path); err != nil { - errs = append(errs, errors.Wrapf(err, "failed adding watcher to '%s'", path)) + errs = append(errs, fmt.Errorf("failed adding watcher to '%s': %w", path, err)) return nil } } @@ -119,7 +119,7 @@ func (watcher *recursiveWatcher) addRecursive(path string) error { watcher.log.Debugw("Added recursive watch", "path", path) if err != nil { - errs = append(errs, errors.Wrapf(err, "failed to walk path '%s'", path)) + errs = append(errs, fmt.Errorf("failed to walk path '%s': %w", path, err)) } return errs.Err() } @@ -166,7 +166,7 @@ func (watcher *recursiveWatcher) forwardEvents() error { case fsnotify.Create: err := watcher.addRecursive(event.Name) if err != nil { - watcher.inner.Errors <- errors.Wrapf(err, "failed to add created path '%s'", event.Name) + watcher.inner.Errors <- fmt.Errorf("failed to add created path '%s': %w", event.Name, err) } err = watcher.tree.Visit(event.Name, PreOrder, func(path string, _ bool) error { watcher.deliver(fsnotify.Event{ @@ -176,7 +176,7 @@ func (watcher *recursiveWatcher) forwardEvents() error { return nil }) if err != nil { - watcher.inner.Errors <- errors.Wrapf(err, "failed to visit created path '%s'", event.Name) + watcher.inner.Errors <- fmt.Errorf("failed to visit created path '%s': %w", event.Name, err) } case fsnotify.Remove: @@ -188,12 +188,12 @@ func (watcher *recursiveWatcher) forwardEvents() error { return nil }) if err != nil { - watcher.inner.Errors <- errors.Wrapf(err, "failed to visit removed path '%s'", event.Name) + watcher.inner.Errors <- fmt.Errorf("failed to visit removed path '%s': %w", event.Name, err) } err = watcher.tree.Remove(event.Name) if err != nil { - watcher.inner.Errors <- errors.Wrapf(err, "failed to visit removed path '%s'", event.Name) + watcher.inner.Errors <- fmt.Errorf("failed to visit removed path '%s': %w", event.Name, err) } // Handling rename (move) as a special case to give this recursion @@ -203,7 +203,7 @@ func (watcher *recursiveWatcher) forwardEvents() error { case fsnotify.Rename: err := watcher.tree.Remove(event.Name) if err != nil { - watcher.inner.Errors <- errors.Wrapf(err, "failed to remove path '%s'", event.Name) + watcher.inner.Errors <- fmt.Errorf("failed to remove path '%s': %w", event.Name, err) } fallthrough diff --git a/auditbeat/module/file_integrity/scanner_test.go b/auditbeat/module/file_integrity/scanner_test.go index 7c53eda8e2d..74bfb46aea8 100644 --- a/auditbeat/module/file_integrity/scanner_test.go +++ b/auditbeat/module/file_integrity/scanner_test.go @@ -126,11 +126,11 @@ func setupTestDir(t *testing.T) string { t.Fatal(err) } - if err = ioutil.WriteFile(filepath.Join(dir, "a"), []byte("file a"), 0600); err != nil { + if err = ioutil.WriteFile(filepath.Join(dir, "a"), []byte("file a"), 0o600); err != nil { t.Fatal(err) } - if err = ioutil.WriteFile(filepath.Join(dir, "b"), []byte("file b"), 0600); err != nil { + if err = ioutil.WriteFile(filepath.Join(dir, "b"), []byte("file b"), 0o600); err != nil { t.Fatal(err) } @@ -138,11 +138,11 @@ func setupTestDir(t *testing.T) string { t.Fatal(err) } - if err = os.Mkdir(filepath.Join(dir, "subdir"), 0700); err != nil { + if err = os.Mkdir(filepath.Join(dir, "subdir"), 0o700); err != nil { t.Fatal(err) } - if err = ioutil.WriteFile(filepath.Join(dir, "subdir", "c"), []byte("file c"), 0600); err != nil { + if err = ioutil.WriteFile(filepath.Join(dir, "subdir", "c"), []byte("file c"), 0o600); err != nil { t.Fatal(err) } diff --git a/auditbeat/scripts/mage/config.go b/auditbeat/scripts/mage/config.go index d6a1f6b1424..f9c4cb434b8 100644 --- a/auditbeat/scripts/mage/config.go +++ b/auditbeat/scripts/mage/config.go @@ -18,10 +18,9 @@ package mage import ( + "fmt" "path/filepath" - "github.com/pkg/errors" - devtools "github.com/elastic/beats/v7/dev-tools/mage" ) @@ -56,12 +55,12 @@ func configFileParams(dirs ...string) (devtools.ConfigFileParams, error) { configFiles, err := devtools.FindFiles(globs...) if err != nil { - return devtools.ConfigFileParams{}, errors.Wrap(err, "failed to find config templates") + return devtools.ConfigFileParams{}, fmt.Errorf("failed to find config templates: %w", err) } if len(configFiles) == 0 { - return devtools.ConfigFileParams{}, errors.Errorf("no config files found in %v", globs) + return devtools.ConfigFileParams{}, fmt.Errorf("no config files found in %v", globs) } - devtools.MustFileConcat("build/config.modules.yml.tmpl", 0644, configFiles...) + devtools.MustFileConcat("build/config.modules.yml.tmpl", 0o644, configFiles...) p := devtools.DefaultConfigFileParams() p.Templates = append(p.Templates, devtools.OSSBeatDir("_meta/config/*.tmpl")) diff --git a/auditbeat/scripts/mage/docs.go b/auditbeat/scripts/mage/docs.go index d28d1df6258..d26c8759b88 100644 --- a/auditbeat/scripts/mage/docs.go +++ b/auditbeat/scripts/mage/docs.go @@ -18,12 +18,12 @@ package mage import ( + "fmt" "os" "path/filepath" "strings" "github.com/magefile/mage/sh" - "github.com/pkg/errors" devtools "github.com/elastic/beats/v7/dev-tools/mage" ) @@ -40,7 +40,7 @@ func ModuleDocs() error { for _, path := range dirsWithModules { files, err := devtools.FindFiles(filepath.Join(path, configTemplateGlob)) if err != nil { - return errors.Wrap(err, "failed to find config templates") + return fmt.Errorf("failed to find config templates: %w", err) } configFiles = append(configFiles, files...) @@ -65,7 +65,7 @@ func ModuleDocs() error { if err := os.RemoveAll(filepath.Join(path, "docs/modules")); err != nil { return err } - if err := os.MkdirAll(filepath.Join(path, "docs/modules"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(path, "docs/modules"), 0o755); err != nil { return err } } diff --git a/auditbeat/scripts/mage/package.go b/auditbeat/scripts/mage/package.go index 94b2a7cde6d..fbe43fa9d3b 100644 --- a/auditbeat/scripts/mage/package.go +++ b/auditbeat/scripts/mage/package.go @@ -18,7 +18,8 @@ package mage import ( - "github.com/pkg/errors" + "errors" + "fmt" devtools "github.com/elastic/beats/v7/dev-tools/mage" ) @@ -40,7 +41,7 @@ const ( func CustomizePackaging(pkgFlavor PackagingFlavor) { var ( shortConfig = devtools.PackageFile{ - Mode: 0600, + Mode: 0o600, Source: "{{.PackageDir}}/auditbeat.yml", Dep: func(spec devtools.PackageSpec) error { return generateConfig(pkgFlavor, devtools.ShortConfigType, spec) @@ -48,7 +49,7 @@ func CustomizePackaging(pkgFlavor PackagingFlavor) { Config: true, } referenceConfig = devtools.PackageFile{ - Mode: 0644, + Mode: 0o644, Source: "{{.PackageDir}}/auditbeat.reference.yml", Dep: func(spec devtools.PackageSpec) error { return generateConfig(pkgFlavor, devtools.ReferenceConfigType, spec) @@ -61,7 +62,7 @@ func CustomizePackaging(pkgFlavor PackagingFlavor) { defaultSampleRulesTarget = "audit.rules.d/sample-rules.conf.disabled" ) sampleRules := devtools.PackageFile{ - Mode: 0644, + Mode: 0o644, Source: sampleRulesSource, Dep: func(spec devtools.PackageSpec) error { if spec.OS != "linux" { @@ -76,7 +77,7 @@ func CustomizePackaging(pkgFlavor PackagingFlavor) { ) if err := devtools.Copy(origin, spec.MustExpand(sampleRulesSource)); err != nil { - return errors.Wrap(err, "failed to copy sample rules") + return fmt.Errorf("failed to copy sample rules: %w", err) } return nil }, @@ -96,7 +97,7 @@ func CustomizePackaging(pkgFlavor PackagingFlavor) { sampleRulesTarget = "/etc/{{.BeatName}}/" + defaultSampleRulesTarget case devtools.Docker: default: - panic(errors.Errorf("unhandled package type: %v", pkgType)) + panic(fmt.Errorf("unhandled package type: %v", pkgType)) } if args.OS == "linux" { @@ -115,7 +116,7 @@ func generateConfig(pkgFlavor PackagingFlavor, ct devtools.ConfigFileType, spec case XPackPackaging: args = XPackConfigFileParams() default: - panic(errors.Errorf("Invalid packaging flavor (either oss or xpack): %v", pkgFlavor)) + panic(fmt.Errorf("Invalid packaging flavor (either oss or xpack): %v", pkgFlavor)) } // PackageDir isn't exported but we can grab it's value this way. diff --git a/x-pack/auditbeat/module/system/host/host.go b/x-pack/auditbeat/module/system/host/host.go index 3a4bb38dee9..30df5acc64e 100644 --- a/x-pack/auditbeat/module/system/host/host.go +++ b/x-pack/auditbeat/module/system/host/host.go @@ -17,7 +17,6 @@ import ( "github.com/cespare/xxhash/v2" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/auditbeat/datastore" "github.com/elastic/beats/v7/libbeat/common" @@ -189,12 +188,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := defaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { - return nil, errors.Wrapf(err, "failed to unpack the %v/%v config", moduleName, metricsetName) + return nil, fmt.Errorf("failed to unpack the %v/%v config: %w", moduleName, metricsetName, err) } bucket, err := datastore.OpenBucket(bucketName) if err != nil { - return nil, errors.Wrap(err, "failed to open persistent datastore") + return nil, fmt.Errorf("failed to open persistent datastore: %w", err) } ms := &MetricSet{ @@ -207,7 +206,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Load state (lastHost) from disk err = ms.restoreStateFromDisk() if err != nil { - return nil, errors.Wrap(err, "failed to restore state from disk") + return nil, fmt.Errorf("failed to restore state from disk: %w", err) } return ms, nil @@ -319,7 +318,7 @@ func (ms *MetricSet) reportChanges(report mb.ReporterV2) error { func getHost() (*Host, error) { sysinfoHost, err := sysinfo.Host() if err != nil { - return nil, errors.Wrap(err, "failed to load host information") + return nil, fmt.Errorf("failed to load host information: %w", err) } ips, macs, err := getNetInfo() @@ -432,12 +431,12 @@ func (ms *MetricSet) saveStateToDisk() error { if ms.lastHost != nil { err := encoder.Encode(*ms.lastHost) if err != nil { - return errors.Wrap(err, "error encoding host information") + return fmt.Errorf("error encoding host information: %w", err) } err = ms.bucket.Store(bucketKeyLastHost, buf.Bytes()) if err != nil { - return errors.Wrap(err, "error writing host information to disk") + return fmt.Errorf("error writing host information to disk: %w", err) } ms.log.Debug("Wrote host information to disk.") @@ -464,7 +463,7 @@ func (ms *MetricSet) restoreStateFromDisk() error { if err == nil { ms.lastHost = &lastHost } else if err != io.EOF { - return errors.Wrap(err, "error decoding host information") + return fmt.Errorf("error decoding host information: %w", err) } } diff --git a/x-pack/auditbeat/module/system/login/login.go b/x-pack/auditbeat/module/system/login/login.go index 82c1e384ab7..19d1753e23b 100644 --- a/x-pack/auditbeat/module/system/login/login.go +++ b/x-pack/auditbeat/module/system/login/login.go @@ -12,8 +12,6 @@ import ( "net" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/auditbeat/datastore" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/cfgwarn" @@ -97,12 +95,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := defaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { - return nil, errors.Wrapf(err, "failed to unpack the %v/%v config", moduleName, metricsetName) + return nil, fmt.Errorf("failed to unpack the %v/%v config: %w", moduleName, metricsetName, err) } bucket, err := datastore.OpenBucket(bucketName) if err != nil { - return nil, errors.Wrap(err, "failed to open persistent datastore") + return nil, fmt.Errorf("failed to open persistent datastore: %w", err) } ms := &MetricSet{ diff --git a/x-pack/auditbeat/module/system/login/login_test.go b/x-pack/auditbeat/module/system/login/login_test.go index e0a6fe35767..fcdb5f6983a 100644 --- a/x-pack/auditbeat/module/system/login/login_test.go +++ b/x-pack/auditbeat/module/system/login/login_test.go @@ -97,7 +97,7 @@ func TestWtmp(t *testing.T) { "Timestamp is not equal: %+v", events[0].Timestamp) // Append logout event to wtmp file and check that it's read - wtmpFile, err := os.OpenFile(wtmpFilepath, os.O_APPEND|os.O_WRONLY, 0644) + wtmpFile, err := os.OpenFile(wtmpFilepath, os.O_APPEND|os.O_WRONLY, 0o644) if err != nil { t.Fatalf("error opening %v: %v", wtmpFilepath, err) } @@ -264,7 +264,6 @@ func checkFieldValue(t *testing.T, mapstr common.MapStr, fieldName string, field default: assert.Equal(t, fieldValue, v) } - } } diff --git a/x-pack/auditbeat/module/system/login/utmp.go b/x-pack/auditbeat/module/system/login/utmp.go index ee0b8b2f5fc..f547562db57 100644 --- a/x-pack/auditbeat/module/system/login/utmp.go +++ b/x-pack/auditbeat/module/system/login/utmp.go @@ -10,6 +10,7 @@ package login import ( "bytes" "encoding/gob" + "fmt" "io" "net" "os" @@ -19,8 +20,6 @@ import ( "strconv" "syscall" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/auditbeat/datastore" "github.com/elastic/beats/v7/libbeat/logp" ) @@ -77,7 +76,7 @@ func NewUtmpFileReader(log *logp.Logger, bucket datastore.Bucket, config config) // Load state (file records, tty mapping) from disk err := r.restoreStateFromDisk() if err != nil { - return nil, errors.Wrap(err, "failed to restore state from disk") + return nil, fmt.Errorf("failed to restore state from disk: %w", err) } return r, nil @@ -103,13 +102,13 @@ func (r *UtmpFileReader) ReadNew() (<-chan LoginRecord, <-chan error) { wtmpFiles, err := r.findFiles(r.config.WtmpFilePattern, Wtmp) if err != nil { - errorC <- errors.Wrap(err, "failed to expand file pattern") + errorC <- fmt.Errorf("failed to expand file pattern: %w", err) return } btmpFiles, err := r.findFiles(r.config.BtmpFilePattern, Btmp) if err != nil { - errorC <- errors.Wrap(err, "failed to expand file pattern") + errorC <- fmt.Errorf("failed to expand file pattern: %w", err) return } @@ -127,7 +126,7 @@ func (r *UtmpFileReader) ReadNew() (<-chan LoginRecord, <-chan error) { func (r *UtmpFileReader) findFiles(filePattern string, utmpType UtmpType) ([]UtmpFile, error) { paths, err := filepath.Glob(filePattern) if err != nil { - return nil, errors.Wrapf(err, "failed to expand file pattern %v", filePattern) + return nil, fmt.Errorf("failed to expand file pattern %v: %w", filePattern, err) } // Sort paths in reverse order (oldest/most-rotated file first) @@ -142,7 +141,7 @@ func (r *UtmpFileReader) findFiles(filePattern string, utmpType UtmpType) ([]Utm r.log.Debugf("File %v does not exist anymore.", path) continue } else { - return nil, errors.Wrapf(err, "unexpected error when looking up file %v", path) + return nil, fmt.Errorf("unexpected error when looking up file %v: %w", path, err) } } @@ -196,7 +195,7 @@ func (r *UtmpFileReader) readNewInFile(loginRecordC chan<- LoginRecord, errorC c // Empty new file - save but don't read. err := r.updateSavedUtmpFile(utmpFile, nil) if err != nil { - errorC <- errors.Wrapf(err, "error updating file record for file %v", utmpFile.Path) + errorC <- fmt.Errorf("error updating file record for file %v: %w", utmpFile.Path, err) } return } @@ -206,7 +205,7 @@ func (r *UtmpFileReader) readNewInFile(loginRecordC chan<- LoginRecord, errorC c f, err := os.Open(utmpFile.Path) if err != nil { - errorC <- errors.Wrapf(err, "error opening file %v", utmpFile.Path) + errorC <- fmt.Errorf("error opening file %v: %w", utmpFile.Path, err) return } defer func() { @@ -214,7 +213,7 @@ func (r *UtmpFileReader) readNewInFile(loginRecordC chan<- LoginRecord, errorC c // otherwise we will just keep trying to re-read very frequently forever. err := r.updateSavedUtmpFile(utmpFile, f) if err != nil { - errorC <- errors.Wrapf(err, "error updating file record for file %v", utmpFile.Path) + errorC <- fmt.Errorf("error updating file record for file %v: %w", utmpFile.Path, err) } f.Close() @@ -225,7 +224,7 @@ func (r *UtmpFileReader) readNewInFile(loginRecordC chan<- LoginRecord, errorC c if size >= oldSize && utmpFile.Offset <= size { _, err = f.Seek(utmpFile.Offset, 0) if err != nil { - errorC <- errors.Wrapf(err, "error setting offset %d for file %v", utmpFile.Offset, utmpFile.Path) + errorC <- fmt.Errorf("error setting offset %d for file %v: %w", utmpFile.Offset, utmpFile.Path, err) } } @@ -234,7 +233,7 @@ func (r *UtmpFileReader) readNewInFile(loginRecordC chan<- LoginRecord, errorC c if size < oldSize || utmpFile.Offset > size || err != nil { _, err = f.Seek(0, 0) if err != nil { - errorC <- errors.Wrapf(err, "error setting offset 0 for file %v", utmpFile.Path) + errorC <- fmt.Errorf("error setting offset 0 for file %v: %w", utmpFile.Path, err) // Even that did not work, so return. return @@ -244,7 +243,7 @@ func (r *UtmpFileReader) readNewInFile(loginRecordC chan<- LoginRecord, errorC c for { utmp, err := ReadNextUtmp(f) if err != nil && err != io.EOF { - errorC <- errors.Wrapf(err, "error reading entry in UTMP file %v", utmpFile.Path) + errorC <- fmt.Errorf("error reading entry in UTMP file %v: %w", utmpFile.Path, err) return } @@ -279,7 +278,7 @@ func (r *UtmpFileReader) updateSavedUtmpFile(utmpFile UtmpFile, f *os.File) erro if f != nil { offset, err := f.Seek(0, 1) if err != nil { - return errors.Wrap(err, "error calling Seek") + return fmt.Errorf("error calling Seek: %w", err) } utmpFile.Offset = offset } @@ -314,7 +313,7 @@ func (r *UtmpFileReader) processBadLoginRecord(utmp *Utmp) (*LoginRecord, error) record.Hostname = utmp.UtHost default: // This should not happen. - return nil, errors.Errorf("UTMP record with unexpected type %v in bad login file", utmp.UtType) + return nil, fmt.Errorf("UTMP record with unexpected type %v in bad login file", utmp.UtType) } return &record, nil @@ -470,13 +469,13 @@ func (r *UtmpFileReader) saveFileRecordsToDisk() error { for _, utmpFile := range r.savedUtmpFiles { err := encoder.Encode(utmpFile) if err != nil { - return errors.Wrap(err, "error encoding UTMP file record") + return fmt.Errorf("error encoding UTMP file record: %w", err) } } err := r.bucket.Store(bucketKeyFileRecords, buf.Bytes()) if err != nil { - return errors.Wrap(err, "error writing UTMP file records to disk") + return fmt.Errorf("error writing UTMP file records to disk: %w", err) } r.log.Debugf("Wrote %d UTMP file records to disk", len(r.savedUtmpFiles)) @@ -490,13 +489,13 @@ func (r *UtmpFileReader) saveLoginSessionsToDisk() error { for _, loginRecord := range r.loginSessions { err := encoder.Encode(loginRecord) if err != nil { - return errors.Wrap(err, "error encoding login record") + return fmt.Errorf("error encoding login record: %w", err) } } err := r.bucket.Store(bucketKeyLoginSessions, buf.Bytes()) if err != nil { - return errors.Wrap(err, "error writing login records to disk") + return fmt.Errorf("error writing login records to disk: %w", err) } r.log.Debugf("Wrote %d open login sessions to disk", len(r.loginSessions)) @@ -540,7 +539,7 @@ func (r *UtmpFileReader) restoreFileRecordsFromDisk() error { // Read all break } else { - return errors.Wrap(err, "error decoding file record") + return fmt.Errorf("error decoding file record: %w", err) } } } @@ -572,7 +571,7 @@ func (r *UtmpFileReader) restoreLoginSessionsFromDisk() error { // Read all break } else { - return errors.Wrap(err, "error decoding login record") + return fmt.Errorf("error decoding login record: %w", err) } } } diff --git a/x-pack/auditbeat/module/system/package/package.go b/x-pack/auditbeat/module/system/package/package.go index b4eddbb726f..8909fbdafbf 100644 --- a/x-pack/auditbeat/module/system/package/package.go +++ b/x-pack/auditbeat/module/system/package/package.go @@ -23,7 +23,6 @@ import ( "github.com/cespare/xxhash/v2" "github.com/gofrs/uuid" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/auditbeat/datastore" "github.com/elastic/beats/v7/libbeat/common" @@ -203,12 +202,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := defaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { - return nil, errors.Wrapf(err, "failed to unpack the %v/%v config", moduleName, metricsetName) + return nil, fmt.Errorf("failed to unpack the %v/%v config: %w", moduleName, metricsetName, err) } bucket, err := datastore.OpenBucket(bucketName) if err != nil { - return nil, errors.Wrap(err, "failed to open persistent datastore") + return nil, fmt.Errorf("failed to open persistent datastore: %w", err) } ms := &MetricSet{ @@ -238,7 +237,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Load from disk: Packages packages, err := ms.restorePackagesFromDisk() if err != nil { - return nil, errors.Wrap(err, "failed to restore packages from disk") + return nil, fmt.Errorf("failed to restore packages from disk: %w", err) } ms.log.Debugf("Restored %d packages from disk", len(packages)) @@ -286,12 +285,12 @@ func (ms *MetricSet) reportState(report mb.ReporterV2) error { packages, err := ms.getPackages() if err != nil { - return errors.Wrap(err, "failed to get packages") + return fmt.Errorf("failed to get packages: %w", err) } stateID, err := uuid.NewV4() if err != nil { - return errors.Wrap(err, "error generating state ID") + return fmt.Errorf("error generating state ID: %w", err) } for _, pkg := range packages { event := ms.packageEvent(pkg, eventTypeState, eventActionExistingPackage) @@ -309,7 +308,7 @@ func (ms *MetricSet) reportState(report mb.ReporterV2) error { } err = ms.bucket.Store(bucketKeyStateTimestamp, timeBytes) if err != nil { - return errors.Wrap(err, "error writing state timestamp to disk") + return fmt.Errorf("error writing state timestamp to disk: %w", err) } return ms.savePackagesToDisk(packages) @@ -319,7 +318,7 @@ func (ms *MetricSet) reportState(report mb.ReporterV2) error { func (ms *MetricSet) reportChanges(report mb.ReporterV2) error { packages, err := ms.getPackages() if err != nil { - return errors.Wrap(err, "failed to get packages") + return fmt.Errorf("failed to get packages: %w", err) } newInCache, missingFromCache := ms.cache.DiffAndUpdateCache(convertToCacheable(packages)) @@ -449,7 +448,7 @@ func (ms *MetricSet) restorePackagesFromDisk() (packages []*Package, err error) // Read all packages break } else { - return nil, errors.Wrap(err, "error decoding packages") + return nil, fmt.Errorf("error decoding packages: %w", err) } } } @@ -465,13 +464,13 @@ func (ms *MetricSet) savePackagesToDisk(packages []*Package) error { for _, pkg := range packages { err := encoder.Encode(*pkg) if err != nil { - return errors.Wrap(err, "error encoding packages") + return fmt.Errorf("error encoding packages: %w", err) } } err := ms.bucket.Store(bucketKeyPackages, buf.Bytes()) if err != nil { - return errors.Wrap(err, "error writing packages to disk") + return fmt.Errorf("error writing packages to disk: %w", err) } return nil } @@ -485,13 +484,13 @@ func (ms *MetricSet) getPackages() (packages []*Package, err error) { rpmPackages, err := listRPMPackages() if err != nil { - return nil, errors.Wrap(err, "error getting RPM packages") + return nil, fmt.Errorf("error getting RPM packages: %w", err) } ms.log.Debugf("RPM packages: %v", len(rpmPackages)) packages = append(packages, rpmPackages...) } else if err != nil && !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "error opening %v", rpmPath) + return nil, fmt.Errorf("error opening %v: %w", rpmPath, err) } _, err = os.Stat(dpkgPath) @@ -500,13 +499,13 @@ func (ms *MetricSet) getPackages() (packages []*Package, err error) { dpkgPackages, err := ms.listDebPackages() if err != nil { - return nil, errors.Wrap(err, "error getting DEB packages") + return nil, fmt.Errorf("error getting DEB packages: %w", err) } ms.log.Debugf("DEB packages: %v", len(dpkgPackages)) packages = append(packages, dpkgPackages...) } else if err != nil && !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "error opening %v", dpkgPath) + return nil, fmt.Errorf("error opening %v: %w", dpkgPath, err) } _, err = os.Stat(homebrewCellarPath) @@ -515,13 +514,13 @@ func (ms *MetricSet) getPackages() (packages []*Package, err error) { homebrewPackages, err := listBrewPackages() if err != nil { - return nil, errors.Wrap(err, "error getting Homebrew packages") + return nil, fmt.Errorf("error getting Homebrew packages: %w", err) } ms.log.Debugf("Homebrew packages: %v", len(homebrewPackages)) packages = append(packages, homebrewPackages...) } else if err != nil && !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "error opening %v", homebrewCellarPath) + return nil, fmt.Errorf("error opening %v: %w", homebrewCellarPath, err) } if !foundPackageManager && !ms.suppressNoPackageWarnings { @@ -540,7 +539,7 @@ func (ms *MetricSet) listDebPackages() ([]*Package, error) { file, err := os.Open(dpkgStatusFile) if err != nil { - return nil, errors.Wrapf(err, "error opening %s", dpkgStatusFile) + return nil, fmt.Errorf("error opening %s: %w", dpkgStatusFile, err) } defer file.Close() @@ -611,7 +610,7 @@ func (ms *MetricSet) listDebPackages() ([]*Package, error) { } if err = scanner.Err(); err != nil { - return nil, errors.Wrapf(err, "error scanning file %v", dpkgStatusFile) + return nil, fmt.Errorf("error scanning file %v: %w", dpkgStatusFile, err) } // Append last package if file ends without newline diff --git a/x-pack/auditbeat/module/system/package/package_homebrew.go b/x-pack/auditbeat/module/system/package/package_homebrew.go index 8227e5e0241..a04bb9de17c 100644 --- a/x-pack/auditbeat/module/system/package/package_homebrew.go +++ b/x-pack/auditbeat/module/system/package/package_homebrew.go @@ -10,12 +10,11 @@ package pkg import ( "bufio" "encoding/json" + "fmt" "io/ioutil" "os" "path" "strings" - - "github.com/pkg/errors" ) // InstallReceiptSource represents the "source" object in Homebrew's INSTALL_RECEIPT.json. @@ -42,7 +41,7 @@ func listBrewPackages() ([]*Package, error) { pkgPath := path.Join(homebrewCellarPath, packageDir.Name()) versions, err := ioutil.ReadDir(pkgPath) if err != nil { - return nil, errors.Wrapf(err, "error reading directory: %s", pkgPath) + return nil, fmt.Errorf("error reading directory: %s: %w", pkgPath, err) } for _, version := range versions { @@ -61,12 +60,12 @@ func listBrewPackages() ([]*Package, error) { installReceiptPath := path.Join(homebrewCellarPath, pkg.Name, pkg.Version, "INSTALL_RECEIPT.json") contents, err := ioutil.ReadFile(installReceiptPath) if err != nil { - pkg.error = errors.Wrapf(err, "error reading %v", installReceiptPath) + pkg.error = fmt.Errorf("error reading %v: %w", installReceiptPath, err) } else { var installReceipt InstallReceipt err = json.Unmarshal(contents, &installReceipt) if err != nil { - pkg.error = errors.Wrapf(err, "error unmarshalling JSON in %v", installReceiptPath) + pkg.error = fmt.Errorf("error unmarshalling JSON in %v: %w", installReceiptPath, err) } else { formulaPath = installReceipt.Source.Path } @@ -79,7 +78,7 @@ func listBrewPackages() ([]*Package, error) { file, err := os.Open(formulaPath) if err != nil { - pkg.error = errors.Wrapf(err, "error reading %v", formulaPath) + pkg.error = fmt.Errorf("error reading %v: %w", formulaPath, err) } else { defer file.Close() @@ -98,7 +97,7 @@ func listBrewPackages() ([]*Package, error) { } } if err = scanner.Err(); err != nil { - pkg.error = errors.Wrapf(err, "error parsing %v", formulaPath) + pkg.error = fmt.Errorf("error parsing %v: %w", formulaPath, err) } } diff --git a/x-pack/auditbeat/module/system/package/package_test.go b/x-pack/auditbeat/module/system/package/package_test.go index 25b53a3feb8..e745d1e7632 100644 --- a/x-pack/auditbeat/module/system/package/package_test.go +++ b/x-pack/auditbeat/module/system/package/package_test.go @@ -193,7 +193,7 @@ func TestPackageGobEncodeDecode(t *testing.T) { if *flagUpdateGob { // NOTE: If you are updating this file then you may have introduced a // a breaking change. - if err := ioutil.WriteFile(gobTestFile, buf.Bytes(), 0644); err != nil { + if err := ioutil.WriteFile(gobTestFile, buf.Bytes(), 0o644); err != nil { t.Fatal(err) } } diff --git a/x-pack/auditbeat/module/system/package/rpm_linux.go b/x-pack/auditbeat/module/system/package/rpm_linux.go index 3215bcf9c16..58e9a9a4a89 100644 --- a/x-pack/auditbeat/module/system/package/rpm_linux.go +++ b/x-pack/auditbeat/module/system/package/rpm_linux.go @@ -214,11 +214,11 @@ func (lib *librpm) close() error { // version number. getLibrpmNames looks at the elf header for the rpm // binary to determine what version of librpm.so it is linked against. func getLibrpmNames() []string { - var rpmPaths = []string{ + rpmPaths := []string{ "/usr/bin/rpm", "/bin/rpm", } - var libNames = []string{ + libNames := []string{ "librpm.so", } var rpmElf *elf.File @@ -249,7 +249,6 @@ func getLibrpmNames() []string { } func openLibrpm() (*librpm, error) { - var librpm librpm var err error @@ -383,7 +382,6 @@ func listRPMPackages() ([]*Package, error) { } func packageFromHeader(header C.Header, openedLibrpm *librpm) (*Package, error) { - header = C.my_headerLink(openedLibrpm.headerLink, header) if header == nil { return nil, fmt.Errorf("Error calling headerLink") diff --git a/x-pack/auditbeat/module/system/package/rpm_others.go b/x-pack/auditbeat/module/system/package/rpm_others.go index 8e441026ff3..77cd42f4214 100644 --- a/x-pack/auditbeat/module/system/package/rpm_others.go +++ b/x-pack/auditbeat/module/system/package/rpm_others.go @@ -8,7 +8,7 @@ package pkg -import "github.com/pkg/errors" +import "errors" func listRPMPackages() ([]*Package, error) { return nil, errors.New("listing RPM packages is only supported on Linux") diff --git a/x-pack/auditbeat/module/system/process/namepace_linux.go b/x-pack/auditbeat/module/system/process/namepace_linux.go index 148f22910b4..62146f7a203 100644 --- a/x-pack/auditbeat/module/system/process/namepace_linux.go +++ b/x-pack/auditbeat/module/system/process/namepace_linux.go @@ -8,11 +8,10 @@ package process import ( + "errors" "fmt" "os" "syscall" - - "github.com/pkg/errors" ) // isNsSharedWith returns whether the process with the given pid shares the diff --git a/x-pack/auditbeat/module/system/process/process.go b/x-pack/auditbeat/module/system/process/process.go index 74b48ad6118..8895222cec8 100644 --- a/x-pack/auditbeat/module/system/process/process.go +++ b/x-pack/auditbeat/module/system/process/process.go @@ -15,7 +15,6 @@ import ( "github.com/cespare/xxhash/v2" "github.com/gofrs/uuid" - "github.com/pkg/errors" "github.com/elastic/beats/v7/auditbeat/datastore" "github.com/elastic/beats/v7/auditbeat/helper/hasher" @@ -148,12 +147,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := defaultConfig if err := base.Module().UnpackConfig(&config); err != nil { - return nil, errors.Wrapf(err, "failed to unpack the %v/%v config", moduleName, metricsetName) + return nil, fmt.Errorf("failed to unpack the %v/%v config: %w", moduleName, metricsetName, err) } bucket, err := datastore.OpenBucket(bucketName) if err != nil { - return nil, errors.Wrap(err, "failed to open persistent datastore") + return nil, fmt.Errorf("failed to open persistent datastore: %w", err) } hasher, err := hasher.NewFileHasher(config.HasherConfig, nil) @@ -231,13 +230,13 @@ func (ms *MetricSet) reportState(report mb.ReporterV2) error { processes, err := ms.getProcesses() if err != nil { - return errors.Wrap(err, "failed to get process infos") + return fmt.Errorf("failed to get process infos: %w", err) } ms.log.Debugf("Found %v processes", len(processes)) stateID, err := uuid.NewV4() if err != nil { - return errors.Wrap(err, "error generating state ID") + return fmt.Errorf("error generating state ID: %w", err) } for _, p := range processes { ms.enrichProcess(p) @@ -264,7 +263,7 @@ func (ms *MetricSet) reportState(report mb.ReporterV2) error { } err = ms.bucket.Store(bucketKeyStateTimestamp, timeBytes) if err != nil { - return errors.Wrap(err, "error writing state timestamp to disk") + return fmt.Errorf("error writing state timestamp to disk: %w", err) } return nil @@ -274,7 +273,7 @@ func (ms *MetricSet) reportState(report mb.ReporterV2) error { func (ms *MetricSet) reportChanges(report mb.ReporterV2) error { processes, err := ms.getProcesses() if err != nil { - return errors.Wrap(err, "failed to get processes") + return fmt.Errorf("failed to get processes: %w", err) } ms.log.Debugf("Found %v processes", len(processes)) @@ -322,8 +321,7 @@ func (ms *MetricSet) enrichProcess(process *Process) { sharedMntNS, err := isNsSharedWith(process.Info.PID, "mnt") if err != nil { if process.Error == nil { - process.Error = errors.Wrapf(err, "failed to get namespaces for %v PID %v", process.Info.Exe, - process.Info.PID) + process.Error = fmt.Errorf("failed to get namespaces for %v PID %v: %w", process.Info.Exe, process.Info.PID, err) } return } @@ -333,8 +331,7 @@ func (ms *MetricSet) enrichProcess(process *Process) { hashes, err := ms.hasher.HashFile(process.Info.Exe) if err != nil { if process.Error == nil { - process.Error = errors.Wrapf(err, "failed to hash executable %v for PID %v", process.Info.Exe, - process.Info.PID) + process.Error = fmt.Errorf("failed to hash executable %v for PID %v: %w", process.Info.Exe, process.Info.PID, err) } return } @@ -442,7 +439,7 @@ func (ms *MetricSet) getProcesses() ([]*Process, error) { sysinfoProcs, err := sysinfo.Processes() if err != nil { - return nil, errors.Wrap(err, "failed to fetch processes") + return nil, fmt.Errorf("failed to fetch processes: %w", err) } for _, sysinfoProc := range sysinfoProcs { @@ -473,7 +470,7 @@ func (ms *MetricSet) getProcesses() ([]*Process, error) { // Record what we can and continue process = &Process{ Info: pInfo, - Error: errors.Wrapf(err, "failed to load process information for PID %d", sysinfoProc.PID()), + Error: fmt.Errorf("failed to load process information for PID %d: %w", sysinfoProc.PID(), err), } process.Info.PID = sysinfoProc.PID() // in case pInfo did not contain it } else { @@ -485,7 +482,7 @@ func (ms *MetricSet) getProcesses() ([]*Process, error) { userInfo, err := sysinfoProc.User() if err != nil { if process.Error == nil { - process.Error = errors.Wrapf(err, "failed to load user for PID %d", sysinfoProc.PID()) + process.Error = fmt.Errorf("failed to load user for PID %d: %w", sysinfoProc.PID(), err) } } else { process.UserInfo = &userInfo diff --git a/x-pack/auditbeat/module/system/socket/dns/afpacket/afpacket.go b/x-pack/auditbeat/module/system/socket/dns/afpacket/afpacket.go index 9c430bb2023..1eb4ede3735 100644 --- a/x-pack/auditbeat/module/system/socket/dns/afpacket/afpacket.go +++ b/x-pack/auditbeat/module/system/socket/dns/afpacket/afpacket.go @@ -9,13 +9,14 @@ package afpacket import ( "context" + "errors" + "fmt" "net" "os" "time" "github.com/dustin/go-humanize" "github.com/miekg/dns" - "github.com/pkg/errors" "golang.org/x/net/bpf" "github.com/elastic/beats/v7/metricbeat/mb" @@ -59,7 +60,7 @@ func init() { func newAFPacketSniffer(base mb.BaseMetricSet, log *logp.Logger) (parent.Sniffer, error) { config := defaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { - return nil, errors.Wrap(err, "failed to unpack af_packet config") + return nil, fmt.Errorf("failed to unpack af_packet config: %w", err) } frameSize, blockSize, numBlocks, err := afpacketComputeSize(8*humanize.MiByte, config.Snaplen, os.Getpagesize()) @@ -83,12 +84,12 @@ func newAFPacketSniffer(base mb.BaseMetricSet, log *logp.Logger) (parent.Sniffer tPacket, err := afpacket.NewTPacket(opts...) if err != nil { - return nil, errors.Wrap(err, "failed creating af_packet sniffer") + return nil, fmt.Errorf("failed creating af_packet sniffer: %w", err) } if err = tPacket.SetBPF(udpSrcPort53Filter); err != nil { tPacket.Close() - return nil, errors.Wrapf(err, "failed setting BPF filter") + return nil, fmt.Errorf("failed setting BPF filter: %w", err) } c := &dnsCapture{ @@ -226,8 +227,8 @@ func (c *dnsCapture) run(ctx context.Context, consumer parent.Consumer) { // The restriction is that the block_size must be divisible by both the // frame size and page size. func afpacketComputeSize(targetSize int, snaplen int, pageSize int) ( - frameSize int, blockSize int, numBlocks int, err error) { - + frameSize int, blockSize int, numBlocks int, err error, +) { if snaplen < pageSize { frameSize = pageSize / (pageSize / snaplen) } else { diff --git a/x-pack/auditbeat/module/system/socket/dns/dns.go b/x-pack/auditbeat/module/system/socket/dns/dns.go index 87c7a01d449..b1efec3d8f8 100644 --- a/x-pack/auditbeat/module/system/socket/dns/dns.go +++ b/x-pack/auditbeat/module/system/socket/dns/dns.go @@ -6,10 +6,9 @@ package dns import ( "context" + "fmt" "net" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/metricbeat/mb" ) @@ -52,7 +51,7 @@ func (noopSniffer) Monitor(context.Context, Consumer) error { func NewSniffer(base mb.BaseMetricSet, log *logp.Logger) (Sniffer, error) { config := defaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { - return nil, errors.Wrap(err, "failed to unpack dns config") + return nil, fmt.Errorf("failed to unpack dns config: %w", err) } if !config.Enabled { return noopSniffer{}, nil diff --git a/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go b/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go index bc86aa817b5..9c17f5136e8 100644 --- a/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go +++ b/x-pack/auditbeat/module/system/socket/guess/cskxmit6.go @@ -11,7 +11,6 @@ import ( "fmt" "unsafe" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" @@ -107,7 +106,7 @@ func (g *guessInet6CskXmit) Prepare(ctx Context) (err error) { g.acceptedFd = -1 g.loopback, err = helper.NewIPv6Loopback() if err != nil { - return errors.Wrap(err, "detect IPv6 loopback failed") + return fmt.Errorf("detect IPv6 loopback failed: %w", err) } defer func() { if err != nil { @@ -116,26 +115,26 @@ func (g *guessInet6CskXmit) Prepare(ctx Context) (err error) { }() clientIP, err := g.loopback.AddRandomAddress() if err != nil { - return errors.Wrap(err, "failed adding first device address") + return fmt.Errorf("failed adding first device address: %w", err) } serverIP, err := g.loopback.AddRandomAddress() if err != nil { - return errors.Wrap(err, "failed adding second device address") + return fmt.Errorf("failed adding second device address: %w", err) } copy(g.clientAddr.Addr[:], clientIP) copy(g.serverAddr.Addr[:], serverIP) if g.client, g.clientAddr, err = createSocket6WithProto(unix.SOCK_STREAM, g.clientAddr); err != nil { - return errors.Wrap(err, "error creating server") + return fmt.Errorf("error creating server: %w", err) } if g.server, g.serverAddr, err = createSocket6WithProto(unix.SOCK_STREAM, g.serverAddr); err != nil { - return errors.Wrap(err, "error creating client") + return fmt.Errorf("error creating client: %w", err) } if err = unix.Listen(g.server, 1); err != nil { - return errors.Wrap(err, "error in listen") + return fmt.Errorf("error in listen: %w", err) } if err = unix.Connect(g.client, &g.serverAddr); err != nil { - return errors.Wrap(err, "connect failed") + return fmt.Errorf("connect failed: %w", err) } return nil } @@ -156,7 +155,7 @@ func (g *guessInet6CskXmit) Terminate() error { func (g *guessInet6CskXmit) Trigger() error { fd, _, err := unix.Accept(g.server) if err != nil { - return errors.Wrap(err, "accept failed") + return fmt.Errorf("accept failed: %w", err) } _, err = unix.Write(fd, []byte("hello world")) return err diff --git a/x-pack/auditbeat/module/system/socket/guess/guess.go b/x-pack/auditbeat/module/system/socket/guess/guess.go index e9040bb6a66..0c632616057 100644 --- a/x-pack/auditbeat/module/system/socket/guess/guess.go +++ b/x-pack/auditbeat/module/system/socket/guess/guess.go @@ -8,11 +8,10 @@ package guess import ( + "errors" "fmt" "time" - "github.com/pkg/errors" - "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" "github.com/elastic/beats/v7/x-pack/auditbeat/tracing" @@ -98,7 +97,7 @@ func Guess(guesser Guesser, installer helper.ProbeInstaller, ctx Context) (resul result, err = guessOnce(guesser, installer, ctx) } if err != nil { - return nil, errors.Wrapf(err, "%s failed", guesser.Name()) + return nil, fmt.Errorf("%s failed: %w", guesser.Name(), err) } return result, nil } @@ -132,7 +131,7 @@ func guessEventually(guess EventualGuesser, installer helper.ProbeInstaller, ctx func guessOnce(guesser Guesser, installer helper.ProbeInstaller, ctx Context) (result common.MapStr, err error) { if err := guesser.Prepare(ctx); err != nil { - return nil, errors.Wrap(err, "prepare failed") + return nil, fmt.Errorf("prepare failed: %w", err) } defer func() { if err := guesser.Terminate(); err != nil { @@ -141,7 +140,7 @@ func guessOnce(guesser Guesser, installer helper.ProbeInstaller, ctx Context) (r }() probes, err := guesser.Probes() if err != nil { - return nil, errors.Wrap(err, "failed generating probes") + return nil, fmt.Errorf("failed generating probes: %w", err) } decoders := make([]tracing.Decoder, 0, len(probes)) @@ -150,7 +149,7 @@ func guessOnce(guesser Guesser, installer helper.ProbeInstaller, ctx Context) (r for _, pdesc := range probes { format, decoder, err := installer.Install(pdesc) if err != nil { - return nil, errors.Wrapf(err, "failed to add kprobe '%s'", pdesc.Probe.String()) + return nil, fmt.Errorf("failed to add kprobe '%s': %w", pdesc.Probe.String(), err) } formats = append(formats, format) decoders = append(decoders, decoder) @@ -177,18 +176,18 @@ func guessOnce(guesser Guesser, installer helper.ProbeInstaller, ctx Context) (r tracing.WithTID(thread.TID), tracing.WithPollTimeout(time.Millisecond*10)) if err != nil { - return nil, errors.Wrap(err, "failed to create perfchannel") + return nil, fmt.Errorf("failed to create perfchannel: %w", err) } defer perfchan.Close() for i := range probes { if err := perfchan.MonitorProbe(formats[i], decoders[i]); err != nil { - return nil, errors.Wrap(err, "failed to monitor probe") + return nil, fmt.Errorf("failed to monitor probe: %w", err) } } if err := perfchan.Run(); err != nil { - return nil, errors.Wrap(err, "failed to run perf channel") + return nil, fmt.Errorf("failed to run perf channel: %w", err) } timer := time.NewTimer(ctx.Timeout) @@ -211,7 +210,7 @@ func guessOnce(guesser Guesser, installer helper.ProbeInstaller, ctx Context) (r select { case r := <-thread.C(): if r.Err != nil { - return nil, errors.Wrap(r.Err, "trigger execution failed") + return nil, fmt.Errorf("trigger execution failed: %w", r.Err) } case <-timer.C: return nil, errors.New("timeout while waiting for trigger to complete") @@ -233,11 +232,11 @@ func guessOnce(guesser Guesser, installer helper.ProbeInstaller, ctx Context) (r case err := <-perfchan.ErrC(): if err != nil { - return nil, errors.Wrap(err, "error received from perf channel") + return nil, fmt.Errorf("error received from perf channel: %w", err) } case <-perfchan.LostC(): - return nil, errors.Wrap(err, "event loss in perf channel") + return nil, errors.New("event loss in perf channel") } } } @@ -265,7 +264,7 @@ func GuessAll(installer helper.ProbeInstaller, ctx Context) (err error) { if cond, isCond := guesser.(ConditionalGuesser); isCond { mustRun, err := cond.Condition(ctx) if err != nil { - return errors.Wrapf(err, "condition failed for %s", cond.Name()) + return fmt.Errorf("condition failed for %s: %w", cond.Name(), err) } if !mustRun { ctx.Log.Debugf("Guess %s skipped.", cond.Name()) diff --git a/x-pack/auditbeat/module/system/socket/guess/helpers.go b/x-pack/auditbeat/module/system/socket/guess/helpers.go index d0b9a2875bf..aa802fa252a 100644 --- a/x-pack/auditbeat/module/system/socket/guess/helpers.go +++ b/x-pack/auditbeat/module/system/socket/guess/helpers.go @@ -9,10 +9,10 @@ package guess import ( "bytes" + "errors" "fmt" "math/rand" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" @@ -29,17 +29,17 @@ func createSocketWithProto(proto int, bindAddr unix.SockaddrInet4) (fd int, addr } if err = unix.Bind(fd, &bindAddr); err != nil { unix.Close(fd) - return -1, addr, errors.Wrap(err, "bind failed") + return -1, addr, fmt.Errorf("bind failed: %w", err) } sa, err := unix.Getsockname(fd) if err != nil { unix.Close(fd) - return -1, addr, errors.Wrap(err, "getsockname failed") + return -1, addr, fmt.Errorf("getsockname failed: %w", err) } addrptr, ok := sa.(*unix.SockaddrInet4) if !ok { unix.Close(fd) - return -1, addr, errors.Wrap(err, "getsockname didn't return a struct sockaddr_in") + return -1, addr, errors.New("getsockname didn't return a struct sockaddr_in") } return fd, *addrptr, nil } @@ -56,15 +56,15 @@ func createSocket6WithProto(proto int, bindAddr unix.SockaddrInet6) (fd int, add } }() if err = unix.Bind(fd, &bindAddr); err != nil { - return -1, addr, errors.Wrap(err, "bind failed") + return -1, addr, fmt.Errorf("bind failed: %w", err) } sa, err := unix.Getsockname(fd) if err != nil { - return -1, addr, errors.Wrap(err, "getsockname failed") + return -1, addr, fmt.Errorf("getsockname failed: %w", err) } addrptr, ok := sa.(*unix.SockaddrInet6) if !ok { - return -1, addr, errors.Wrap(err, "getsockname didn't return a struct sockaddr_in") + return -1, addr, errors.New("getsockname didn't return a struct sockaddr_in") } return fd, *addrptr, nil } diff --git a/x-pack/auditbeat/module/system/socket/guess/inetsock.go b/x-pack/auditbeat/module/system/socket/guess/inetsock.go index 8e435d42b03..8139bcf3ba7 100644 --- a/x-pack/auditbeat/module/system/socket/guess/inetsock.go +++ b/x-pack/auditbeat/module/system/socket/guess/inetsock.go @@ -10,8 +10,8 @@ package guess import ( "bytes" "encoding/binary" + "fmt" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" @@ -105,13 +105,13 @@ func (g *guessInetSockIPv4) Prepare(ctx Context) (err error) { g.remote.Addr = randomLocalIP() } if g.server, g.local, err = createSocket(g.local); err != nil { - return errors.Wrap(err, "error creating server") + return fmt.Errorf("error creating server: %w", err) } if g.client, g.remote, err = createSocket(g.remote); err != nil { - return errors.Wrap(err, "error creating client") + return fmt.Errorf("error creating client: %w", err) } if err = unix.Listen(g.server, 1); err != nil { - return errors.Wrap(err, "error in listen") + return fmt.Errorf("error in listen: %w", err) } return nil } @@ -204,7 +204,8 @@ func (g *guessInetSockIPv4) Reduce(results []common.MapStr) (result common.MapSt for _, key := range []string{ "INET_SOCK_LADDR", "INET_SOCK_LPORT", - "INET_SOCK_RADDR", "INET_SOCK_RPORT"} { + "INET_SOCK_RADDR", "INET_SOCK_RPORT", + } { list, err := getListField(result, key) if err != nil { return nil, err diff --git a/x-pack/auditbeat/module/system/socket/guess/inetsock6.go b/x-pack/auditbeat/module/system/socket/guess/inetsock6.go index b61486849bc..a66e5d14417 100644 --- a/x-pack/auditbeat/module/system/socket/guess/inetsock6.go +++ b/x-pack/auditbeat/module/system/socket/guess/inetsock6.go @@ -12,7 +12,6 @@ import ( "fmt" "strings" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" @@ -234,7 +233,7 @@ func (g *guessInetSockIPv6) Prepare(ctx Context) (err error) { } g.loopback, err = helper.NewIPv6Loopback() if err != nil { - return errors.Wrap(err, "detect IPv6 loopback failed") + return fmt.Errorf("detect IPv6 loopback failed: %w", err) } defer func() { if err != nil { @@ -243,23 +242,23 @@ func (g *guessInetSockIPv6) Prepare(ctx Context) (err error) { }() clientIP, err := g.loopback.AddRandomAddress() if err != nil { - return errors.Wrap(err, "failed adding first device address") + return fmt.Errorf("failed adding first device address: %w", err) } serverIP, err := g.loopback.AddRandomAddress() if err != nil { - return errors.Wrap(err, "failed adding second device address") + return fmt.Errorf("failed adding second device address: %w", err) } copy(g.clientAddr.Addr[:], clientIP) copy(g.serverAddr.Addr[:], serverIP) if g.client, g.clientAddr, err = createSocket6WithProto(unix.SOCK_STREAM, g.clientAddr); err != nil { - return errors.Wrap(err, "error creating server") + return fmt.Errorf("error creating server: %w", err) } if g.server, g.serverAddr, err = createSocket6WithProto(unix.SOCK_STREAM, g.serverAddr); err != nil { - return errors.Wrap(err, "error creating client") + return fmt.Errorf("error creating client: %w", err) } if err = unix.Listen(g.server, 1); err != nil { - return errors.Wrap(err, "error in listen") + return fmt.Errorf("error in listen: %w", err) } return nil } @@ -267,11 +266,11 @@ func (g *guessInetSockIPv6) Prepare(ctx Context) (err error) { // Trigger connects the client to the server, causing an inet_csk_accept call. func (g *guessInetSockIPv6) Trigger() error { if err := unix.Connect(g.client, &g.serverAddr); err != nil { - return errors.Wrap(err, "connect failed") + return fmt.Errorf("connect failed: %w", err) } fd, _, err := unix.Accept(g.server) if err != nil { - return errors.Wrap(err, "accept failed") + return fmt.Errorf("accept failed: %w", err) } unix.Close(fd) return nil diff --git a/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go b/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go index 293a79eff91..fd53364c892 100644 --- a/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go +++ b/x-pack/auditbeat/module/system/socket/guess/inetsockaf.go @@ -8,7 +8,8 @@ package guess import ( - "github.com/pkg/errors" + "errors" + "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" diff --git a/x-pack/auditbeat/module/system/socket/guess/skbuff.go b/x-pack/auditbeat/module/system/socket/guess/skbuff.go index a90a49644a4..be2a9bd678d 100644 --- a/x-pack/auditbeat/module/system/socket/guess/skbuff.go +++ b/x-pack/auditbeat/module/system/socket/guess/skbuff.go @@ -10,11 +10,11 @@ package guess import ( "encoding/binary" "encoding/hex" + "errors" "fmt" "math/rand" "unsafe" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" @@ -135,7 +135,7 @@ func (g *guessSkBuffLen) Extract(ev interface{}) (common.MapStr, bool) { uIntSize = 4 n = skbuffDumpSize / uIntSize maxOverhead = 128 - minHeadersSize = 0 //20 /* min IP*/ + 20 /* min TCP */ + minHeadersSize = 0 // 20 /* min IP*/ + 20 /* min TCP */ ipHeaderSizeChunk = 4 ) target := uint32(g.written) @@ -281,14 +281,14 @@ func (g *guessSkBuffProto) Prepare(ctx Context) (err error) { g.ctx = ctx g.hasIPv6, err = isIPv6Enabled(ctx.Vars) if err != nil { - return errors.Wrap(err, "unable to determine if IPv6 is enabled") + return fmt.Errorf("unable to determine if IPv6 is enabled: %w", err) } g.doIPv6 = g.hasIPv6 && !g.doIPv6 g.msg = make([]byte, 0x123) if g.doIPv6 { g.loopback, err = helper.NewIPv6Loopback() if err != nil { - return errors.Wrap(err, "detect IPv6 loopback failed") + return fmt.Errorf("detect IPv6 loopback failed: %w", err) } defer func() { if err != nil { @@ -297,20 +297,20 @@ func (g *guessSkBuffProto) Prepare(ctx Context) (err error) { }() clientIP, err := g.loopback.AddRandomAddress() if err != nil { - return errors.Wrap(err, "failed adding first device address") + return fmt.Errorf("failed adding first device address: %w", err) } serverIP, err := g.loopback.AddRandomAddress() if err != nil { - return errors.Wrap(err, "failed adding second device address") + return fmt.Errorf("failed adding second device address: %w", err) } copy(g.clientAddr.Addr[:], clientIP) copy(g.serverAddr.Addr[:], serverIP) if g.client, g.clientAddr, err = createSocket6WithProto(unix.SOCK_DGRAM, g.clientAddr); err != nil { - return errors.Wrap(err, "error creating server") + return fmt.Errorf("error creating server: %w", err) } if g.server, g.serverAddr, err = createSocket6WithProto(unix.SOCK_DGRAM, g.serverAddr); err != nil { - return errors.Wrap(err, "error creating client") + return fmt.Errorf("error creating client: %w", err) } } else { g.cs.SetupUDP() @@ -334,17 +334,17 @@ func (g *guessSkBuffProto) Terminate() (err error) { func (g *guessSkBuffProto) Trigger() error { if g.doIPv6 { if err := unix.Sendto(g.client, g.msg, 0, &g.serverAddr); err != nil { - return errors.Wrap(err, "failed to send ipv4") + return fmt.Errorf("failed to send ipv4: %w", err) } if _, _, err := unix.Recvfrom(g.server, g.msg, 0); err != nil { - return errors.Wrap(err, "failed to receive ipv4") + return fmt.Errorf("failed to receive ipv4: %w", err) } } else { if err := unix.Sendto(g.cs.client, g.msg, 0, &g.cs.srvAddr); err != nil { - return errors.Wrap(err, "failed to send ipv4") + return fmt.Errorf("failed to send ipv4: %w", err) } if _, _, err := unix.Recvfrom(g.cs.server, g.msg, 0); err != nil { - return errors.Wrap(err, "failed to receive ipv4") + return fmt.Errorf("failed to receive ipv4: %w", err) } } return nil @@ -591,10 +591,10 @@ func (g *guessSkBuffDataPtr) Terminate() error { // Trigger causes a packet to be received at server socket. func (g *guessSkBuffDataPtr) Trigger() error { if err := unix.Sendto(g.cs.client, g.payload, 0, &g.cs.srvAddr); err != nil { - return errors.Wrap(err, "failed to send ipv4") + return fmt.Errorf("failed to send ipv4: %w", err) } if _, _, err := unix.Recvfrom(g.cs.server, g.payload, 0); err != nil { - return errors.Wrap(err, "failed to receive ipv4") + return fmt.Errorf("failed to receive ipv4: %w", err) } return nil } diff --git a/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go b/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go index 2817a41e5d9..78ab0197983 100644 --- a/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go +++ b/x-pack/auditbeat/module/system/socket/guess/sockaddrin.go @@ -10,8 +10,8 @@ package guess import ( "bytes" "encoding/binary" + "fmt" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" @@ -92,13 +92,13 @@ func (g *guessSockaddrIn) Prepare(ctx Context) (err error) { g.remote.Addr = randomLocalIP() } if g.server, g.local, err = createSocket(g.local); err != nil { - return errors.Wrap(err, "error creating server") + return fmt.Errorf("error creating server: %w", err) } if g.client, g.remote, err = createSocket(g.remote); err != nil { - return errors.Wrap(err, "error creating client") + return fmt.Errorf("error creating client: %w", err) } if err = unix.Listen(g.server, 1); err != nil { - return errors.Wrap(err, "error in listen") + return fmt.Errorf("error in listen: %w", err) } return nil } diff --git a/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go b/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go index 0ce53e84883..5a2af0f7f5d 100644 --- a/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go +++ b/x-pack/auditbeat/module/system/socket/guess/sockaddrin6.go @@ -9,8 +9,8 @@ package guess import ( "encoding/binary" + "fmt" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" @@ -89,7 +89,7 @@ func (g *guessSockaddrIn6) Prepare(ctx Context) (err error) { g.ctx = ctx g.loopback, err = helper.NewIPv6Loopback() if err != nil { - return errors.Wrap(err, "detect IPv6 loopback failed") + return fmt.Errorf("detect IPv6 loopback failed: %w", err) } defer func() { if err != nil { @@ -98,23 +98,23 @@ func (g *guessSockaddrIn6) Prepare(ctx Context) (err error) { }() clientIP, err := g.loopback.AddRandomAddress() if err != nil { - return errors.Wrap(err, "failed adding first device address") + return fmt.Errorf("failed adding first device address: %w", err) } serverIP, err := g.loopback.AddRandomAddress() if err != nil { - return errors.Wrap(err, "failed adding second device address") + return fmt.Errorf("failed adding second device address: %w", err) } copy(g.clientAddr.Addr[:], clientIP) copy(g.serverAddr.Addr[:], serverIP) if g.client, g.clientAddr, err = createSocket6WithProto(unix.SOCK_STREAM, g.clientAddr); err != nil { - return errors.Wrap(err, "error creating server") + return fmt.Errorf("error creating server: %w", err) } if g.server, g.serverAddr, err = createSocket6WithProto(unix.SOCK_STREAM, g.serverAddr); err != nil { - return errors.Wrap(err, "error creating client") + return fmt.Errorf("error creating client: %w", err) } if err = unix.Listen(g.server, 1); err != nil { - return errors.Wrap(err, "error in listen") + return fmt.Errorf("error in listen: %w", err) } return nil } @@ -132,11 +132,11 @@ func (g *guessSockaddrIn6) Terminate() error { // Trigger performs a connection attempt on the random address. func (g *guessSockaddrIn6) Trigger() error { if err := unix.Connect(g.client, &g.serverAddr); err != nil { - return errors.Wrap(err, "connect failed") + return fmt.Errorf("connect failed: %w", err) } fd, _, err := unix.Accept(g.server) if err != nil { - return errors.Wrap(err, "accept failed") + return fmt.Errorf("accept failed: %w", err) } unix.Close(fd) return nil diff --git a/x-pack/auditbeat/module/system/socket/helper/loopback.go b/x-pack/auditbeat/module/system/socket/helper/loopback.go index 4cc0ca3eb56..eea98399049 100644 --- a/x-pack/auditbeat/module/system/socket/helper/loopback.go +++ b/x-pack/auditbeat/module/system/socket/helper/loopback.go @@ -8,13 +8,14 @@ package helper import ( + "errors" + "fmt" "math/rand" "net" "time" "unsafe" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -47,7 +48,7 @@ func NewIPv6Loopback() (lo IPv6Loopback, err error) { lo.fd = -1 devs, err := net.Interfaces() if err != nil { - return lo, errors.Wrap(err, "cannot list interfaces") + return lo, fmt.Errorf("cannot list interfaces: %w", err) } for _, dev := range devs { addrs, err := dev.Addrs() @@ -60,14 +61,14 @@ func NewIPv6Loopback() (lo IPv6Loopback, err error) { lo.fd, err = unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_IP) if err != nil { lo.fd = -1 - return lo, errors.Wrap(err, "ipv6 socket failed") + return lo, fmt.Errorf("ipv6 socket failed: %w", err) } copy(lo.ifreq.name[:], dev.Name) lo.ifreq.name[len(dev.Name)] = 0 _, _, errno := unix.Syscall(unix.SYS_IOCTL, uintptr(lo.fd), unix.SIOCGIFINDEX, uintptr(unsafe.Pointer(&lo.ifreq))) if errno != 0 { unix.Close(lo.fd) - return lo, errors.Wrap(errno, "ioctl(SIOCGIFINDEX) failed") + return lo, fmt.Errorf("ioctl(SIOCGIFINDEX) failed: %w", errno) } return lo, nil } @@ -88,7 +89,7 @@ func (lo *IPv6Loopback) AddRandomAddress() (addr net.IP, err error) { req.prefix = 128 _, _, e := unix.Syscall(unix.SYS_IOCTL, uintptr(lo.fd), unix.SIOCSIFADDR, uintptr(unsafe.Pointer(&req))) if e != 0 { - return nil, errors.Wrap(e, "ioctl SIOCSIFADDR failed") + return nil, fmt.Errorf("ioctl SIOCSIFADDR failed: %w", e) } lo.addresses = append(lo.addresses, addr) @@ -97,7 +98,7 @@ func (lo *IPv6Loopback) AddRandomAddress() (addr net.IP, err error) { // available to bind. fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, 0) if err != nil { - return addr, errors.Wrap(err, "socket ipv6 dgram failed") + return addr, fmt.Errorf("socket ipv6 dgram failed: %w", err) } defer unix.Close(fd) var bindAddr unix.SockaddrInet6 @@ -111,7 +112,10 @@ func (lo *IPv6Loopback) AddRandomAddress() (addr net.IP, err error) { } time.Sleep(time.Millisecond * time.Duration(i)) } - return addr, errors.Wrap(err, "bind failed") + if err != nil { + err = fmt.Errorf("bind failed: %w", err) + } + return addr, err } // Cleanup removes the addresses registered to this loopback. diff --git a/x-pack/auditbeat/module/system/socket/kprobes.go b/x-pack/auditbeat/module/system/socket/kprobes.go index 67d86d6237d..f56d542020a 100644 --- a/x-pack/auditbeat/module/system/socket/kprobes.go +++ b/x-pack/auditbeat/module/system/socket/kprobes.go @@ -8,12 +8,12 @@ package socket import ( + "errors" "fmt" "strings" "unsafe" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/x-pack/auditbeat/module/system/socket/helper" @@ -51,14 +51,14 @@ func (p *probeInstaller) Install(pdef helper.ProbeDef) (format tracing.ProbeForm return format, decoder, errors.New("nil decoder in probe definition") } if err = p.traceFS.AddKProbe(pdef.Probe); err != nil { - return format, decoder, errors.Wrapf(err, "failed installing probe '%s'", pdef.Probe.String()) + return format, decoder, fmt.Errorf("failed installing probe '%s': %w", pdef.Probe.String(), err) } p.installed = append(p.installed, pdef.Probe) if format, err = p.traceFS.LoadProbeFormat(pdef.Probe); err != nil { - return format, decoder, errors.Wrap(err, "failed to load probe format") + return format, decoder, fmt.Errorf("failed to load probe format: %w", err) } if decoder, err = pdef.Decoder(format); err != nil { - return format, decoder, errors.Wrap(err, "failed to create decoder") + return format, decoder, fmt.Errorf("failed to create decoder: %w", err) } return } @@ -79,13 +79,13 @@ func (p *probeInstaller) UninstallInstalled() error { func (p *probeInstaller) UninstallIf(condition helper.ProbeCondition) error { kprobes, err := p.traceFS.ListKProbes() if err != nil { - return errors.Wrap(err, "failed to list installed kprobes") + return fmt.Errorf("failed to list installed kprobes: %w", err) } var errs multierror.Errors for _, probe := range kprobes { if condition(probe) { if err := p.traceFS.RemoveKProbe(probe); err != nil { - errs = append(errs, errors.Wrapf(err, "unable to remove kprobe '%s'", probe.String())) + errs = append(errs, fmt.Errorf("unable to remove kprobe '%s': %w", probe.String(), err)) } } } @@ -137,10 +137,9 @@ func WithFilterPort(portnum uint16) ProbeTransform { // KProbes shared with IPv4 and IPv6. var sharedKProbes = []helper.ProbeDef{ - - /*************************************************************************** - * RUNNING PROCESSES - **************************************************************************/ + //*************************************************************************** + //* RUNNING PROCESSES + //*************************************************************************** { Probe: tracing.Probe{ @@ -370,9 +369,9 @@ var ipv4OnlyKProbes = []helper.ProbeDef{ // KProbes used when IPv6 is enabled. var ipv6KProbes = []helper.ProbeDef{ - /*************************************************************************** - * IPv6 - **************************************************************************/ + //*************************************************************************** + //* IPv6 + //*************************************************************************** // IPv6 socket created. Good for associating sockets with pids. // ** This is a struct socket* not a struct sock* ** diff --git a/x-pack/auditbeat/module/system/socket/socket_linux.go b/x-pack/auditbeat/module/system/socket/socket_linux.go index 7ba99f7a455..f6d38003b88 100644 --- a/x-pack/auditbeat/module/system/socket/socket_linux.go +++ b/x-pack/auditbeat/module/system/socket/socket_linux.go @@ -10,6 +10,7 @@ package socket import ( "context" "encoding/binary" + "errors" "fmt" "os" "path/filepath" @@ -21,7 +22,6 @@ import ( "syscall" "time" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" @@ -103,7 +103,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := defaultConfig if err := base.Module().UnpackConfig(&config); err != nil { - return nil, errors.Wrapf(err, "failed to unpack the %s config", fullName) + return nil, fmt.Errorf("failed to unpack the %s config: %w", fullName, err) } if instance != nil { // Do not instantiate a new dataset if the config hasn't changed. @@ -127,7 +127,7 @@ func newSocketMetricset(config Config, base mb.BaseMetricSet) (*MetricSet, error logger := logp.NewLogger(metricsetName) sniffer, err := dns.NewSniffer(base, logger) if err != nil { - return nil, errors.Wrap(err, "unable to create DNS sniffer") + return nil, fmt.Errorf("unable to create DNS sniffer: %w", err) } ms := &MetricSet{ SystemMetricSet: system.NewSystemMetricSet(base), @@ -142,7 +142,7 @@ func newSocketMetricset(config Config, base mb.BaseMetricSet) (*MetricSet, error // Setup the metricset before Run() so that startup can be halted in case of // error. if err = ms.Setup(); err != nil { - return nil, errors.Wrapf(err, "%s dataset setup failed", fullName) + return nil, fmt.Errorf("%s dataset setup failed: %w", fullName, err) } return ms, nil } @@ -168,14 +168,14 @@ func (m *MetricSet) Run(r mb.PushReporterV2) { m.log.Errorf("Unable to store DNS transaction %+v: %v", tr, err) } }); err != nil { - err = errors.Wrap(err, "unable to start DNS sniffer") + err = fmt.Errorf("unable to start DNS sniffer: %w", err) r.Error(err) m.log.Error(err) return } if err := m.perfChannel.Run(); err != nil { - err = errors.Wrap(err, "unable to start perf channel") + err = fmt.Errorf("unable to start perf channel: %w", err) r.Error(err) m.log.Error(err) return @@ -309,7 +309,7 @@ func (m *MetricSet) Setup() (err error) { traceFS, err = tracing.NewTraceFSWithPath(*m.config.TraceFSPath) } if err != nil { - return errors.Wrap(err, "tracefs/debugfs is not mounted or not writeable") + return fmt.Errorf("tracefs/debugfs is not mounted or not writeable: %w", err) } // @@ -366,7 +366,7 @@ func (m *MetricSet) Setup() (err error) { // remove existing Auditbeat KProbes that match the current PID. // if err = m.installer.UninstallIf(isThisAuditbeat); err != nil { - return errors.Wrapf(err, "unable to delete existing KProbes for group %s", groupName) + return fmt.Errorf("unable to delete existing KProbes for group %s: %w", groupName, err) } // @@ -420,7 +420,7 @@ func (m *MetricSet) Setup() (err error) { Vars: m.templateVars, Timeout: m.config.GuessTimeout, }); err != nil { - return errors.Wrap(err, "unable to guess one or more required parameters") + return fmt.Errorf("unable to guess one or more required parameters: %w", err) } if m.isDebug { @@ -446,7 +446,7 @@ func (m *MetricSet) Setup() (err error) { tracing.WithTID(perf.AllThreads), tracing.WithTimestamp()) if err != nil { - return errors.Wrapf(err, "unable to create perf channel") + return fmt.Errorf("unable to create perf channel: %w", err) } // @@ -455,10 +455,10 @@ func (m *MetricSet) Setup() (err error) { for _, probeDef := range getKProbes(hasIPv6) { format, decoder, err := m.installer.Install(probeDef) if err != nil { - return errors.Wrapf(err, "unable to register probe %s", probeDef.Probe.String()) + return fmt.Errorf("unable to register probe %s: %w", probeDef.Probe.String(), err) } if err = m.perfChannel.MonitorProbe(format, decoder); err != nil { - return errors.Wrapf(err, "unable to monitor probe %s", probeDef.Probe.String()) + return fmt.Errorf("unable to monitor probe %s: %w", probeDef.Probe.String(), err) } } return nil diff --git a/x-pack/auditbeat/module/system/socket/state.go b/x-pack/auditbeat/module/system/socket/state.go index 74c09ec7d1c..c1e6865c2c2 100644 --- a/x-pack/auditbeat/module/system/socket/state.go +++ b/x-pack/auditbeat/module/system/socket/state.go @@ -9,6 +9,7 @@ package socket import ( "encoding/binary" + "errors" "fmt" "net" "os" @@ -17,7 +18,6 @@ import ( "sync/atomic" "time" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/beats/v7/libbeat/common" @@ -438,8 +438,10 @@ func (s *state) DoneFlows() linkedList { return r } -var lastEvents uint64 -var lastTime time.Time +var ( + lastEvents uint64 + lastTime time.Time +) func (s *state) logState() { s.Lock() @@ -470,7 +472,6 @@ func (s *state) logState() { } else { s.log.Warnf("%s. Warnings: %v", msg, errs) } - } func (s *state) reapLoop() { diff --git a/x-pack/auditbeat/module/system/socket/state_test.go b/x-pack/auditbeat/module/system/socket/state_test.go index 79f7838840e..7327d31e8a2 100644 --- a/x-pack/auditbeat/module/system/socket/state_test.go +++ b/x-pack/auditbeat/module/system/socket/state_test.go @@ -9,6 +9,7 @@ package socket import ( "encoding/binary" + "errors" "fmt" "net" "os" @@ -17,7 +18,6 @@ import ( "time" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "golang.org/x/sys/unix" @@ -511,7 +511,7 @@ func feedEvents(evs []event, st *state, t *testing.T) error { t.Logf("Delivering event %d: %s", idx, ev.String()) // TODO: err if err := ev.Update(st); err != nil { - return errors.Wrapf(err, "error feeding event '%s'", ev.String()) + return fmt.Errorf("error feeding event '%s': %w", ev.String(), err) } } return nil diff --git a/x-pack/auditbeat/module/system/user/user.go b/x-pack/auditbeat/module/system/user/user.go index 69fb25e7187..e51c970b290 100644 --- a/x-pack/auditbeat/module/system/user/user.go +++ b/x-pack/auditbeat/module/system/user/user.go @@ -22,7 +22,6 @@ import ( "github.com/cespare/xxhash/v2" "github.com/gofrs/uuid" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/beats/v7/auditbeat/datastore" "github.com/elastic/beats/v7/libbeat/common" @@ -237,12 +236,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := defaultConfig() if err := base.Module().UnpackConfig(&config); err != nil { - return nil, errors.Wrapf(err, "failed to unpack the %v/%v config", moduleName, metricsetName) + return nil, fmt.Errorf("failed to unpack the %v/%v config: %w", moduleName, metricsetName, err) } bucket, err := datastore.OpenBucket(bucketName) if err != nil { - return nil, errors.Wrap(err, "failed to open persistent datastore") + return nil, fmt.Errorf("failed to open persistent datastore: %w", err) } ms := &MetricSet{ @@ -278,7 +277,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Load from disk: Users users, err := ms.restoreUsersFromDisk() if err != nil { - return nil, errors.Wrap(err, "failed to restore users from disk") + return nil, fmt.Errorf("failed to restore users from disk: %w", err) } ms.log.Debugf("Restored %d users from disk", len(users)) @@ -322,14 +321,14 @@ func (ms *MetricSet) reportState(report mb.ReporterV2) error { users, err := GetUsers(ms.config.DetectPasswordChanges) if err != nil { - errs = append(errs, errors.Wrap(err, "error while getting users")) + errs = append(errs, fmt.Errorf("error while getting users: %w", err)) } ms.log.Debugf("Found %v users", len(users)) if len(users) > 0 { stateID, err := uuid.NewV4() if err != nil { - errs = append(errs, errors.Wrap(err, "error generating state ID")) + errs = append(errs, fmt.Errorf("error generating state ID: %w", err)) } for _, user := range users { @@ -350,7 +349,7 @@ func (ms *MetricSet) reportState(report mb.ReporterV2) error { } else { err = ms.bucket.Store(bucketKeyStateTimestamp, timeBytes) if err != nil { - errs = append(errs, errors.Wrap(err, "error writing state timestamp to disk")) + errs = append(errs, fmt.Errorf("error writing state timestamp to disk: %w", err)) } } @@ -383,7 +382,7 @@ func (ms *MetricSet) reportChanges(report mb.ReporterV2) error { users, err := GetUsers(ms.config.DetectPasswordChanges) if err != nil { - errs = append(errs, errors.Wrap(err, "error while getting users")) + errs = append(errs, fmt.Errorf("error while getting users: %w", err)) } ms.log.Debugf("Found %v users", len(users)) @@ -561,7 +560,7 @@ func (ms *MetricSet) restoreUsersFromDisk() (users []*User, err error) { // Read all users break } else { - return nil, errors.Wrap(err, "error decoding users") + return nil, fmt.Errorf("error decoding users: %w", err) } } } @@ -577,13 +576,13 @@ func (ms *MetricSet) saveUsersToDisk(users []*User) error { for _, user := range users { err := encoder.Encode(*user) if err != nil { - return errors.Wrap(err, "error encoding users") + return fmt.Errorf("error encoding users: %w", err) } } err := ms.bucket.Store(bucketKeyUsers, buf.Bytes()) if err != nil { - return errors.Wrap(err, "error writing users to disk") + return fmt.Errorf("error writing users to disk: %w", err) } return nil } @@ -593,7 +592,7 @@ func (ms *MetricSet) haveFilesChanged() (bool, error) { var stats syscall.Stat_t for _, path := range ms.userFiles { if err := syscall.Stat(path, &stats); err != nil { - return true, errors.Wrapf(err, "failed to stat %v", path) + return true, fmt.Errorf("failed to stat %v: %w", path, err) } ctime := time.Unix(int64(stats.Ctim.Sec), int64(stats.Ctim.Nsec)) diff --git a/x-pack/auditbeat/module/system/user/user_test.go b/x-pack/auditbeat/module/system/user/user_test.go index 65e8ae455f0..81e2b5be7ef 100644 --- a/x-pack/auditbeat/module/system/user/user_test.go +++ b/x-pack/auditbeat/module/system/user/user_test.go @@ -64,11 +64,11 @@ func testUser() *User { UID: "9999", GID: "1001", Groups: []*user.Group{ - &user.Group{ + { Gid: "1001", Name: "__elastic", }, - &user.Group{ + { Gid: "1002", Name: "docker", }, diff --git a/x-pack/auditbeat/module/system/user/users_linux.go b/x-pack/auditbeat/module/system/user/users_linux.go index f564c282cbd..e3412339c49 100644 --- a/x-pack/auditbeat/module/system/user/users_linux.go +++ b/x-pack/auditbeat/module/system/user/users_linux.go @@ -19,6 +19,7 @@ import "C" import ( "crypto/sha512" + "fmt" "os/user" "runtime" "strconv" @@ -27,12 +28,9 @@ import ( "time" "github.com/joeshaw/multierror" - "github.com/pkg/errors" ) -var ( - epoch = time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) -) +var epoch = time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) // GetUsers retrieves a list of users using information from // /etc/passwd, /etc/group, and - if configured - /etc/shadow. @@ -84,7 +82,7 @@ func readPasswdFile(readPasswords bool) ([]*User, error) { // getpwent() can return ENOENT even when there is no error, // see https://github.com/systemd/systemd/issues/9585. if err != nil && err != syscall.ENOENT { - return users, errors.Wrap(err, "error getting user") + return users, fmt.Errorf("error getting user: %w", err) } // No more entries @@ -135,7 +133,7 @@ func enrichWithGroups(users []*User) error { groupIds, err := goUser.GroupIds() if err != nil { - return errors.Wrapf(err, "error getting group IDs for user %v (UID: %v)", u.Name, u.UID) + return fmt.Errorf("error getting group IDs for user %v (UID: %v): %w", u.Name, u.UID, err) } for _, gid := range groupIds { @@ -143,7 +141,7 @@ func enrichWithGroups(users []*User) error { if !found { group, err = user.LookupGroupId(gid) if err != nil { - return errors.Wrapf(err, "error looking up group ID %v for user %v (UID: %v)", gid, u.Name, u.UID) + return fmt.Errorf("error looking up group ID %v for user %v (UID: %v): %w", gid, u.Name, u.UID, err) } gidCache[gid] = group } @@ -204,7 +202,7 @@ func readShadowFile() (map[string]shadowFileEntry, error) { if spwd == nil { if err != nil { - return shadowEntries, errors.Wrap(err, "error while reading shadow file") + return shadowEntries, fmt.Errorf("error while reading shadow file: %w", err) } // No more entries diff --git a/x-pack/auditbeat/tracing/cpu.go b/x-pack/auditbeat/tracing/cpu.go index 800d08ebe9a..506f22fcc63 100644 --- a/x-pack/auditbeat/tracing/cpu.go +++ b/x-pack/auditbeat/tracing/cpu.go @@ -9,11 +9,10 @@ package tracing import ( "bytes" + "fmt" "io/ioutil" "strconv" "strings" - - "github.com/pkg/errors" ) const ( @@ -97,7 +96,7 @@ func NewCPUSetFromExpression(contents string) (CPUSet, error) { for _, numStr := range parts { num16, err := strconv.ParseInt(numStr, 10, 16) if err != nil || num16 < 0 { - return CPUSet{}, errors.Errorf("failed to parse integer '%s' from range '%s' at '%s'", numStr, expr, contents) + return CPUSet{}, fmt.Errorf("failed to parse integer '%s' from range '%s' at '%s'", numStr, expr, contents) } num := int(num16) r = append(r, num) @@ -124,7 +123,7 @@ func NewCPUSetFromExpression(contents string) (CPUSet, error) { to = r[1] } if from == -1 || to < from { - return CPUSet{}, errors.Errorf("invalid cpu range %v in '%s'", r, contents) + return CPUSet{}, fmt.Errorf("invalid cpu range %v in '%s'", r, contents) } for i := from; i <= to; i++ { if !mask[i] { diff --git a/x-pack/auditbeat/tracing/events_test.go b/x-pack/auditbeat/tracing/events_test.go index 180761f7700..90c649f4728 100644 --- a/x-pack/auditbeat/tracing/events_test.go +++ b/x-pack/auditbeat/tracing/events_test.go @@ -209,7 +209,7 @@ func TestKProbeReal(t *testing.T) { probe := Probe{ Name: "test_kprobe", Address: "sys_connect", - //Fetchargs: "exe=$comm fd=%di +0(%si) +8(%si) +16(%si) +24(%si) +99999(%ax):string", + // Fetchargs: "exe=$comm fd=%di +0(%si) +8(%si) +16(%si) +24(%si) +99999(%ax):string", Fetchargs: "ax=%ax bx=%bx:u8 cx=%cx:u32 dx=%dx:s16", } err = evs.AddKProbe(probe) @@ -220,19 +220,19 @@ func TestKProbeReal(t *testing.T) { if err != nil { t.Fatal(err) } - //fmt.Fprintf(os.Stderr, "desc=%+v\n", desc) + // fmt.Fprintf(os.Stderr, "desc=%+v\n", desc) var decoder Decoder const useStructDecoder = false if useStructDecoder { type myStruct struct { - //Exe string `kprobe:"exe"` + // Exe string `kprobe:"exe"` PID uint32 `kprobe:"common_pid"` AX int64 `kprobe:"ax"` BX uint8 `kprobe:"bx"` CX int32 `kprobe:"cx"` DX uint16 `kprobe:"dx"` } - var allocFn = func() interface{} { + allocFn := func() interface{} { return new(myStruct) } if decoder, err = NewStructDecoder(desc, allocFn); err != nil { @@ -308,7 +308,7 @@ func TestKProbeEventsList(t *testing.T) { } defer os.RemoveAll(tmpDir) - if err := os.MkdirAll(tmpDir, 0700); err != nil { + if err := os.MkdirAll(tmpDir, 0o700); err != nil { t.Fatal(err) } file, err := os.Create(filepath.Join(tmpDir, "kprobe_events")) @@ -365,7 +365,7 @@ func TestKProbeEventsAddRemoveKProbe(t *testing.T) { } defer os.RemoveAll(tmpDir) - if err := os.MkdirAll(tmpDir, 0700); err != nil { + if err := os.MkdirAll(tmpDir, 0o700); err != nil { t.Fatal(err) } file, err := os.Create(filepath.Join(tmpDir, "kprobe_events")) diff --git a/x-pack/auditbeat/tracing/perfevent.go b/x-pack/auditbeat/tracing/perfevent.go index 79112a1c885..016fb1d1e02 100644 --- a/x-pack/auditbeat/tracing/perfevent.go +++ b/x-pack/auditbeat/tracing/perfevent.go @@ -9,6 +9,7 @@ package tracing import ( "context" + "errors" "fmt" "os" "sync" @@ -17,7 +18,6 @@ import ( "unsafe" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "golang.org/x/sys/unix" "github.com/elastic/go-perf" @@ -120,7 +120,7 @@ func NewPerfChannel(cfg ...PerfChannelConf) (channel *PerfChannel, err error) { // at runtime (CPU hotplug). channel.cpus, err = NewCPUSetFromFile(OnlineCPUsPath) if err != nil { - return nil, errors.Wrap(err, "error listing online CPUs") + return nil, fmt.Errorf("error listing online CPUs: %w", err) } if channel.cpus.NumCPU() < 1 { return nil, errors.New("couldn't list online CPUs") @@ -247,7 +247,7 @@ func (c *PerfChannel) MonitorProbe(format ProbeFormat, decoder Decoder) error { fbytes := []byte(format.Probe.Filter + "\x00") _, _, errNo := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), unix.PERF_EVENT_IOC_SET_FILTER, uintptr(unsafe.Pointer(&fbytes[0]))) if errNo != 0 { - return errors.Wrapf(errNo, "unable to set filter '%s'", format.Probe.Filter) + return fmt.Errorf("unable to set filter '%s': %w", format.Probe.Filter, errNo) } } c.streams[cid] = stream{probeID: format.ID, decoder: decoder} @@ -255,7 +255,7 @@ func (c *PerfChannel) MonitorProbe(format ProbeFormat, decoder Decoder) error { if !doGroup { if err := ev.MapRingNumPages(c.mappedPages); err != nil { - return errors.Wrap(err, "perf channel mapring failed") + return fmt.Errorf("perf channel mapring failed: %w", err) } } } @@ -292,7 +292,7 @@ func (c *PerfChannel) Run() error { for _, ev := range c.events { if err := ev.Enable(); err != nil { - return errors.Wrap(err, "perf channel enable failed") + return fmt.Errorf("perf channel enable failed: %w", err) } } c.wg.Add(1) @@ -312,10 +312,10 @@ func (c *PerfChannel) Close() error { var errs multierror.Errors for _, ev := range c.events { if err := ev.Disable(); err != nil { - errs = append(errs, errors.Wrap(err, "failed to disable event channel")) + errs = append(errs, fmt.Errorf("failed to disable event channel: %w", err)) } if err := ev.Close(); err != nil { - errs = append(errs, errors.Wrap(err, "failed to close event channel")) + errs = append(errs, fmt.Errorf("failed to close event channel: %w", err)) } } return errs.Err() @@ -446,7 +446,7 @@ func (m *recordMerger) nextSample(ctx context.Context) (sr *perf.SampleRecord, o // No sample was available. Block until one of the ringbuffers has data. _, closed, err := pollAll(m.evs, m.timeout) if err != nil { - m.channel.errC <- errors.Wrap(err, "poll failed") + m.channel.errC <- fmt.Errorf("poll failed: %w", err) return nil, false } // Some of the ring buffers closed. Report termination. From 7b67384954718e9f59a39549f741a2156396679f Mon Sep 17 00:00:00 2001 From: Mat Schaffer Date: Mon, 21 Feb 2022 12:10:22 +0900 Subject: [PATCH 03/13] Add drop and explicit tests to avoid duplicate ingest of elasticsearch logs (#30440) * Ensure we drop server logs that show up in deprecation pipeline * Add note about deprecation dataset normalization * Add test for mixed es server logs This pipeline already contained a drop to avoid duplicate logging. * Ensure we drop server logs that show up in slowlog pipeline This was partially guarded against in testing due to the grok on `elasticsearch.slowlog` but probably better to explicitly drop and avoid duplicate logging. * Add "mixed" test for elasticsearch audit logs test-audit-docker.log also contains a case but it was overlooked in the expected file until https://github.com/elastic/beats/pull/30164 added the appropriate drop statements. * Changelog entry * Remove duplicatd filebeat header --- CHANGELOG.next.asciidoc | 1 + .../audit/test/test-audit-800.mixed.log | 2 + .../test-audit-800.mixed.log-expected.json | 41 ++++++++++++ .../deprecation/ingest/pipeline-json-8.yml | 4 ++ .../test/es_deprecation-json.800.mixed.log | 2 + ...precation-json.800.mixed.log-expected.json | 32 ++++++++++ .../test/elasticsearch-json.800.mixed.log | 2 + ...ticsearch-json.800.mixed.log-expected.json | 24 +++++++ .../slowlog/ingest/pipeline-json-8.yml | 3 + .../slowlog/test/es_slowlog.800.mixed.log | 3 + .../es_slowlog.800.mixed.log-expected.json | 63 +++++++++++++++++++ 11 files changed, 177 insertions(+) create mode 100644 filebeat/module/elasticsearch/audit/test/test-audit-800.mixed.log create mode 100644 filebeat/module/elasticsearch/audit/test/test-audit-800.mixed.log-expected.json create mode 100644 filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.mixed.log create mode 100644 filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.mixed.log-expected.json create mode 100644 filebeat/module/elasticsearch/server/test/elasticsearch-json.800.mixed.log create mode 100644 filebeat/module/elasticsearch/server/test/elasticsearch-json.800.mixed.log-expected.json create mode 100644 filebeat/module/elasticsearch/slowlog/test/es_slowlog.800.mixed.log create mode 100644 filebeat/module/elasticsearch/slowlog/test/es_slowlog.800.mixed.log-expected.json diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 298f376bf42..a07654b6e34 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -55,6 +55,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif - Fix broken Kafka input {issue}29746[29746] {pull}30277[30277] - Report the starting offset of the line in `log.offset` when using `filestream` instead of the end to be ECS compliant. {pull}30445[30445] - auditd: Prevent mapping explosion when truncated EXECVE records are ingested. {pull}30382[30382] +- elasticsearch: fix duplicate ingest when using a common appender configuration {issue}30428[30428] {pull}30440[30440] *Heartbeat* - Fix missing mapping for `service.name`. {pull}30324[30324] diff --git a/filebeat/module/elasticsearch/audit/test/test-audit-800.mixed.log b/filebeat/module/elasticsearch/audit/test/test-audit-800.mixed.log new file mode 100644 index 00000000000..ea47c05004b --- /dev/null +++ b/filebeat/module/elasticsearch/audit/test/test-audit-800.mixed.log @@ -0,0 +1,2 @@ +{"type":"audit", "timestamp":"2022-01-27T14:16:25,271+0100", "node.id":"O8SFUsk8QpGG16JVJcNgUw", "event.type":"transport", "event.action":"access_granted", "authentication.type":"REALM", "user.name":"elastic", "user.realm":"reserved", "user.roles":["superuser"], "origin.type":"rest", "origin.address":"[::1]:64583", "request.id":"yEUG-8deS2y8ZxGgeyeUnw", "action":"indices:admin/create", "request.name":"CreateIndexRequest", "indices":["test_1"], "opaque_id":"myApp1", "trace.id":"0af7651916cd43dd8448eb211c80319c"} +{"@timestamp":"2022-01-25T15:12:08.686Z", "log.level": "INFO", "message":"adding template [.monitoring-alerts-7] for index patterns [.monitoring-alerts-7]", "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.server","process.thread.name":"elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetadataIndexTemplateService","elasticsearch.cluster.uuid":"28iKoFsvTJ6HEyXbdLL-PQ","elasticsearch.node.id":"tc3nhgC0SFCKfwwy6jCmkw","elasticsearch.node.name":"matschaffer-mbp2019.lan","elasticsearch.cluster.name":"main"} diff --git a/filebeat/module/elasticsearch/audit/test/test-audit-800.mixed.log-expected.json b/filebeat/module/elasticsearch/audit/test/test-audit-800.mixed.log-expected.json new file mode 100644 index 00000000000..b182b67a999 --- /dev/null +++ b/filebeat/module/elasticsearch/audit/test/test-audit-800.mixed.log-expected.json @@ -0,0 +1,41 @@ +[ + { + "@timestamp": "2022-01-27T13:16:25.271Z", + "elasticsearch.audit.action": "indices:admin/create", + "elasticsearch.audit.authentication.type": "REALM", + "elasticsearch.audit.indices": [ + "test_1" + ], + "elasticsearch.audit.layer": "transport", + "elasticsearch.audit.opaque_id": "myApp1", + "elasticsearch.audit.origin.type": "rest", + "elasticsearch.audit.request.id": "yEUG-8deS2y8ZxGgeyeUnw", + "elasticsearch.audit.request.name": "CreateIndexRequest", + "elasticsearch.audit.user.realm": "reserved", + "elasticsearch.audit.user.roles": [ + "superuser" + ], + "elasticsearch.node.id": "O8SFUsk8QpGG16JVJcNgUw", + "event.action": "access_granted", + "event.category": "database", + "event.dataset": "elasticsearch.audit", + "event.kind": "event", + "event.module": "elasticsearch", + "event.outcome": "success", + "fileset.name": "audit", + "host.id": "O8SFUsk8QpGG16JVJcNgUw", + "http.request.id": "yEUG-8deS2y8ZxGgeyeUnw", + "input.type": "log", + "log.offset": 0, + "message": "{\"type\":\"audit\", \"timestamp\":\"2022-01-27T14:16:25,271+0100\", \"node.id\":\"O8SFUsk8QpGG16JVJcNgUw\", \"event.type\":\"transport\", \"event.action\":\"access_granted\", \"authentication.type\":\"REALM\", \"user.name\":\"elastic\", \"user.realm\":\"reserved\", \"user.roles\":[\"superuser\"], \"origin.type\":\"rest\", \"origin.address\":\"[::1]:64583\", \"request.id\":\"yEUG-8deS2y8ZxGgeyeUnw\", \"action\":\"indices:admin/create\", \"request.name\":\"CreateIndexRequest\", \"indices\":[\"test_1\"], \"opaque_id\":\"myApp1\", \"trace.id\":\"0af7651916cd43dd8448eb211c80319c\"}", + "related.user": [ + "elastic" + ], + "service.type": "elasticsearch", + "source.address": "[::1]:64583", + "source.ip": "::1", + "source.port": 64583, + "trace.id": "0af7651916cd43dd8448eb211c80319c", + "user.name": "elastic" + } +] \ No newline at end of file diff --git a/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-8.yml b/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-8.yml index 89c7b4083f6..d42697a0e92 100644 --- a/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-8.yml +++ b/filebeat/module/elasticsearch/deprecation/ingest/pipeline-json-8.yml @@ -10,6 +10,10 @@ processors: - dot_expander: field: '*' override: true +# Drop any non-deprecation logs that show up due to mixed log output configurations +- drop: + if: '!["deprecation", "deprecation.elasticsearch"].contains(ctx.event.dataset)' +# Normalize event.dataset value for kibana queries - set: field: event.dataset value: elasticsearch.deprecation diff --git a/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.mixed.log b/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.mixed.log new file mode 100644 index 00000000000..fa9b17805ee --- /dev/null +++ b/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.mixed.log @@ -0,0 +1,2 @@ +{"@timestamp":"2022-01-27T11:48:45.809Z", "log.level":"CRITICAL", "data_stream.dataset":"deprecation.elasticsearch","data_stream.namespace":"default","data_stream.type":"logs","elasticsearch.elastic_product_origin":"","elasticsearch.event.category":"compatible_api","elasticsearch.http.request.x_opaque_id":"v7app","event.code":"create_index_with_types","message":"[types removal] Using include_type_name in create index requests is deprecated. The parameter will be removed in the next major version." , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"deprecation.elasticsearch","process.thread.name":"elasticsearch[runTask-0][transport_worker][T#8]","log.logger":"org.elasticsearch.deprecation.rest.action.admin.indices.RestCreateIndexAction","trace.id":"0af7651916cd43dd8448eb211c80319c","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} +{"@timestamp":"2022-01-25T15:12:08.686Z", "log.level": "INFO", "message":"adding template [.monitoring-alerts-7] for index patterns [.monitoring-alerts-7]", "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.server","process.thread.name":"elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetadataIndexTemplateService","elasticsearch.cluster.uuid":"28iKoFsvTJ6HEyXbdLL-PQ","elasticsearch.node.id":"tc3nhgC0SFCKfwwy6jCmkw","elasticsearch.node.name":"matschaffer-mbp2019.lan","elasticsearch.cluster.name":"main"} diff --git a/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.mixed.log-expected.json b/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.mixed.log-expected.json new file mode 100644 index 00000000000..f3cd1b8c0d4 --- /dev/null +++ b/filebeat/module/elasticsearch/deprecation/test/es_deprecation-json.800.mixed.log-expected.json @@ -0,0 +1,32 @@ +[ + { + "@timestamp": "2022-01-27T11:48:45.809Z", + "data_stream.dataset": "deprecation.elasticsearch", + "data_stream.namespace": "default", + "data_stream.type": "logs", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.elastic_product_origin": "", + "elasticsearch.event.category": "compatible_api", + "elasticsearch.http.request.x_opaque_id": "v7app", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", + "event.category": "database", + "event.code": "create_index_with_types", + "event.dataset": "elasticsearch.deprecation", + "event.kind": "event", + "event.module": "elasticsearch", + "event.type": "info", + "fileset.name": "deprecation", + "host.id": "tVLnAGLgQum5ca6z50aqbw", + "input.type": "log", + "log.level": "CRITICAL", + "log.logger": "org.elasticsearch.deprecation.rest.action.admin.indices.RestCreateIndexAction", + "log.offset": 0, + "message": "[types removal] Using include_type_name in create index requests is deprecated. The parameter will be removed in the next major version.", + "process.thread.name": "elasticsearch[runTask-0][transport_worker][T#8]", + "service.name": "ES_ECS", + "service.type": "elasticsearch", + "trace.id": "0af7651916cd43dd8448eb211c80319c" + } +] diff --git a/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.mixed.log b/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.mixed.log new file mode 100644 index 00000000000..6e4365a9f0c --- /dev/null +++ b/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.mixed.log @@ -0,0 +1,2 @@ +{"@timestamp":"2022-01-25T15:12:08.472Z", "log.level": "INFO", "message":"adding template [.monitoring-kibana] for index patterns [.monitoring-kibana-7-*]", "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.server","process.thread.name":"elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetadataIndexTemplateService","elasticsearch.cluster.uuid":"28iKoFsvTJ6HEyXbdLL-PQ","elasticsearch.node.id":"tc3nhgC0SFCKfwwy6jCmkw","elasticsearch.node.name":"matschaffer-mbp2019.lan","elasticsearch.cluster.name":"main"} +{"@timestamp":"2022-01-27T11:48:45.809Z", "log.level":"CRITICAL", "data_stream.dataset":"deprecation.elasticsearch","data_stream.namespace":"default","data_stream.type":"logs","elasticsearch.elastic_product_origin":"","elasticsearch.event.category":"compatible_api","elasticsearch.http.request.x_opaque_id":"v7app","event.code":"create_index_with_types","message":"[types removal] Using include_type_name in create index requests is deprecated. The parameter will be removed in the next major version." , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"deprecation.elasticsearch","process.thread.name":"elasticsearch[runTask-0][transport_worker][T#8]","log.logger":"org.elasticsearch.deprecation.rest.action.admin.indices.RestCreateIndexAction","trace.id":"0af7651916cd43dd8448eb211c80319c","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} diff --git a/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.mixed.log-expected.json b/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.mixed.log-expected.json new file mode 100644 index 00000000000..645f3121652 --- /dev/null +++ b/filebeat/module/elasticsearch/server/test/elasticsearch-json.800.mixed.log-expected.json @@ -0,0 +1,24 @@ +[ + { + "@timestamp": "2022-01-25T15:12:08.472Z", + "elasticsearch.cluster.name": "main", + "elasticsearch.cluster.uuid": "28iKoFsvTJ6HEyXbdLL-PQ", + "elasticsearch.node.id": "tc3nhgC0SFCKfwwy6jCmkw", + "elasticsearch.node.name": "matschaffer-mbp2019.lan", + "event.category": "database", + "event.dataset": "elasticsearch.server", + "event.kind": "event", + "event.module": "elasticsearch", + "event.type": "info", + "fileset.name": "server", + "host.id": "tc3nhgC0SFCKfwwy6jCmkw", + "input.type": "log", + "log.level": "INFO", + "log.logger": "org.elasticsearch.cluster.metadata.MetadataIndexTemplateService", + "log.offset": 0, + "message": "adding template [.monitoring-kibana] for index patterns [.monitoring-kibana-7-*]", + "process.thread.name": "elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]", + "service.name": "ES_ECS", + "service.type": "elasticsearch" + } +] \ No newline at end of file diff --git a/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-8.yml b/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-8.yml index 3e0479d59ea..49f65cbcb8b 100644 --- a/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-8.yml +++ b/filebeat/module/elasticsearch/slowlog/ingest/pipeline-json-8.yml @@ -10,6 +10,9 @@ processors: - dot_expander: field: '*' override: true +# Drop any non-slowlog messages that show up due to mixed log output configurations +- drop: + if: '!["elasticsearch.slowlog", "elasticsearch.index_indexing_slowlog", "elasticsearch.index_search_slowlog"].contains(ctx.event.dataset)' - convert: field: elasticsearch.slowlog.took_millis type: float diff --git a/filebeat/module/elasticsearch/slowlog/test/es_slowlog.800.mixed.log b/filebeat/module/elasticsearch/slowlog/test/es_slowlog.800.mixed.log new file mode 100644 index 00000000000..28e824893fa --- /dev/null +++ b/filebeat/module/elasticsearch/slowlog/test/es_slowlog.800.mixed.log @@ -0,0 +1,3 @@ +{"@timestamp":"2022-01-25T15:12:08.686Z", "log.level": "INFO", "message":"adding template [.monitoring-alerts-7] for index patterns [.monitoring-alerts-7]", "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.server","process.thread.name":"elasticsearch[matschaffer-mbp2019.lan][masterService#updateTask][T#1]","log.logger":"org.elasticsearch.cluster.metadata.MetadataIndexTemplateService","elasticsearch.cluster.uuid":"28iKoFsvTJ6HEyXbdLL-PQ","elasticsearch.node.id":"tc3nhgC0SFCKfwwy6jCmkw","elasticsearch.node.name":"matschaffer-mbp2019.lan","elasticsearch.cluster.name":"main"} +{"@timestamp":"2022-01-27T11:39:29.508Z", "log.level":"TRACE", "elasticsearch.slowlog.id":"_oRVm34B7FprLQsjW_Zh","elasticsearch.slowlog.message":"[test_1/8pT6xiN_Tt-dcJWRR3LX6A]","elasticsearch.slowlog.source":"{\\\"a\\\":","elasticsearch.slowlog.took":"1.7ms","elasticsearch.slowlog.took_millis":"1" , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.index_indexing_slowlog","process.thread.name":"elasticsearch[runTask-0][write][T#5]","log.logger":"index.indexing.slowlog.index","trace.id":"0af7651916cd43dd8448eb211c80319c","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} +{"@timestamp":"2022-01-27T11:42:31.395Z", "log.level":"DEBUG", "elasticsearch.slowlog.id":null,"elasticsearch.slowlog.message":"[test_1][0]","elasticsearch.slowlog.search_type":"QUERY_THEN_FETCH","elasticsearch.slowlog.source":"{}","elasticsearch.slowlog.stats":"[]","elasticsearch.slowlog.took":"115.3micros","elasticsearch.slowlog.took_millis":0,"elasticsearch.slowlog.total_hits":"2 hits","elasticsearch.slowlog.total_shards":1 , "ecs.version": "1.2.0","service.name":"ES_ECS","event.dataset":"elasticsearch.index_search_slowlog","process.thread.name":"elasticsearch[runTask-0][search][T#5]","log.logger":"index.search.slowlog.query","elasticsearch.cluster.uuid":"5alW33KLT16Lp1SevDqDSQ","elasticsearch.node.id":"tVLnAGLgQum5ca6z50aqbw","elasticsearch.node.name":"runTask-0","elasticsearch.cluster.name":"runTask"} diff --git a/filebeat/module/elasticsearch/slowlog/test/es_slowlog.800.mixed.log-expected.json b/filebeat/module/elasticsearch/slowlog/test/es_slowlog.800.mixed.log-expected.json new file mode 100644 index 00000000000..02570cad647 --- /dev/null +++ b/filebeat/module/elasticsearch/slowlog/test/es_slowlog.800.mixed.log-expected.json @@ -0,0 +1,63 @@ +[ + { + "@timestamp": "2022-01-27T11:39:29.508Z", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.index.id": "8pT6xiN_Tt-dcJWRR3LX6A", + "elasticsearch.index.name": "test_1", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", + "elasticsearch.slowlog.id": "_oRVm34B7FprLQsjW_Zh", + "elasticsearch.slowlog.source": "{\\\"a\\\":", + "elasticsearch.slowlog.took": "1.7ms", + "event.category": "database", + "event.dataset": "elasticsearch.index_indexing_slowlog", + "event.duration": 1000000, + "event.kind": "event", + "event.module": "elasticsearch", + "event.type": "info", + "fileset.name": "slowlog", + "host.id": "tVLnAGLgQum5ca6z50aqbw", + "input.type": "log", + "log.level": "TRACE", + "log.logger": "index.indexing.slowlog.index", + "log.offset": 608, + "message": "[test_1/8pT6xiN_Tt-dcJWRR3LX6A]", + "process.thread.name": "elasticsearch[runTask-0][write][T#5]", + "service.name": "ES_ECS", + "service.type": "elasticsearch", + "trace.id": "0af7651916cd43dd8448eb211c80319c" + }, + { + "@timestamp": "2022-01-27T11:42:31.395Z", + "elasticsearch.cluster.name": "runTask", + "elasticsearch.cluster.uuid": "5alW33KLT16Lp1SevDqDSQ", + "elasticsearch.index.name": "test_1", + "elasticsearch.node.id": "tVLnAGLgQum5ca6z50aqbw", + "elasticsearch.node.name": "runTask-0", + "elasticsearch.shard.id": "0", + "elasticsearch.slowlog.id": null, + "elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH", + "elasticsearch.slowlog.source": "{}", + "elasticsearch.slowlog.stats": "[]", + "elasticsearch.slowlog.took": "115.3micros", + "elasticsearch.slowlog.total_hits": "2 hits", + "elasticsearch.slowlog.total_shards": 1, + "event.category": "database", + "event.dataset": "elasticsearch.index_search_slowlog", + "event.duration": 0, + "event.kind": "event", + "event.module": "elasticsearch", + "event.type": "info", + "fileset.name": "slowlog", + "host.id": "tVLnAGLgQum5ca6z50aqbw", + "input.type": "log", + "log.level": "DEBUG", + "log.logger": "index.search.slowlog.query", + "log.offset": 1346, + "message": "[test_1][0]", + "process.thread.name": "elasticsearch[runTask-0][search][T#5]", + "service.name": "ES_ECS", + "service.type": "elasticsearch" + } +] \ No newline at end of file From 452d447372aac5449cf66e05c6e0eaaaf468dd66 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Mon, 21 Feb 2022 10:42:58 +0100 Subject: [PATCH 04/13] fix typos and improve sentences (#30432) --- x-pack/elastic-agent/pkg/agent/application/paths/files.go | 4 ++-- x-pack/elastic-agent/pkg/agent/storage/store/state_store.go | 4 ++-- .../pkg/core/monitoring/beats/beats_monitor.go | 6 +++--- x-pack/elastic-agent/pkg/core/monitoring/monitor.go | 2 +- x-pack/elastic-agent/pkg/core/plugin/process/start.go | 2 +- x-pack/elastic-agent/pkg/core/plugin/service/app.go | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/x-pack/elastic-agent/pkg/agent/application/paths/files.go b/x-pack/elastic-agent/pkg/agent/application/paths/files.go index 44ec030ba6c..b2f3215d516 100644 --- a/x-pack/elastic-agent/pkg/agent/application/paths/files.go +++ b/x-pack/elastic-agent/pkg/agent/application/paths/files.go @@ -20,10 +20,10 @@ const defaultAgentFleetFile = "fleet.yml" // defaultAgentEnrollFile is a name of file used to enroll agent on first-start const defaultAgentEnrollFile = "enroll.yml" -// defaultAgentActionStoreFile is the file that will contains the action that can be replayed after restart. +// defaultAgentActionStoreFile is the file that will contain the action that can be replayed after restart. const defaultAgentActionStoreFile = "action_store.yml" -// defaultAgentStateStoreFile is the file that will contains the action that can be replayed after restart. +// defaultAgentStateStoreFile is the file that will contain the action that can be replayed after restart. const defaultAgentStateStoreFile = "state.yml" // AgentConfigFile is a name of file used to store agent information diff --git a/x-pack/elastic-agent/pkg/agent/storage/store/state_store.go b/x-pack/elastic-agent/pkg/agent/storage/store/state_store.go index 902e5f9f746..c370b941b15 100644 --- a/x-pack/elastic-agent/pkg/agent/storage/store/state_store.go +++ b/x-pack/elastic-agent/pkg/agent/storage/store/state_store.go @@ -11,7 +11,7 @@ import ( "io" "sync" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/storage" @@ -46,7 +46,7 @@ type action = fleetapi.Action // receives multiples actions to persist to disk, the implementation of the store only // take care of action policy change every other action are discarded. The store will only keep the // last good action on disk, we assume that the action is added to the store after it was ACK with -// Fleet. The store is not threadsafe. +// Fleet. The store is not thread safe. type StateStore struct { log *logger.Logger store storeLoad diff --git a/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go b/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go index 939aa89c99d..083e2803a0a 100644 --- a/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go +++ b/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go @@ -23,8 +23,8 @@ import ( const httpPlusPrefix = "http+" const defaultMonitoringNamespace = "default" -// Monitor is a monitoring interface providing information about the way -// how beat is monitored +// Monitor implements the monitoring.Monitor interface providing information +// about beats. type Monitor struct { operatingSystem string config *monitoringConfig.MonitoringConfig @@ -209,7 +209,7 @@ func (b *Monitor) Prepare(spec program.Spec, pipelineID string, uid, gid int) er } // LogPath describes a path where application stores logs. Empty if -// application is not monitorable +// application is not monitorable. func (b *Monitor) LogPath(spec program.Spec, pipelineID string) string { if !b.WatchLogs() { return "" diff --git a/x-pack/elastic-agent/pkg/core/monitoring/monitor.go b/x-pack/elastic-agent/pkg/core/monitoring/monitor.go index 71e78d20a51..3294405d363 100644 --- a/x-pack/elastic-agent/pkg/core/monitoring/monitor.go +++ b/x-pack/elastic-agent/pkg/core/monitoring/monitor.go @@ -29,7 +29,7 @@ type Monitor interface { Close() } -// NewMonitor creates a monitor based on a process configuration. +// NewMonitor creates beats a monitor based on a process configuration. func NewMonitor(cfg *configuration.SettingsConfig) (Monitor, error) { logMetrics := true if cfg.LoggingConfig != nil { diff --git a/x-pack/elastic-agent/pkg/core/plugin/process/start.go b/x-pack/elastic-agent/pkg/core/plugin/process/start.go index b946e692cfe..1b52163976c 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/process/start.go +++ b/x-pack/elastic-agent/pkg/core/plugin/process/start.go @@ -39,7 +39,7 @@ func (a *Application) start(ctx context.Context, t app.Taggable, cfg map[string] }() // starting only if it's not running - // or if it is, then only in case it's restart and this call initiates from restart call + // or if it is, then only in case it's a restart and this call initiates from restart call if a.Started() && a.state.Status != state.Restarting { if a.state.ProcessInfo == nil { // already started if not stopped or crashed diff --git a/x-pack/elastic-agent/pkg/core/plugin/service/app.go b/x-pack/elastic-agent/pkg/core/plugin/service/app.go index 4b0d2b8ee0a..5272c8b7cdf 100644 --- a/x-pack/elastic-agent/pkg/core/plugin/service/app.go +++ b/x-pack/elastic-agent/pkg/core/plugin/service/app.go @@ -296,7 +296,7 @@ func (a *Application) Shutdown() { // OnStatusChange is the handler called by the GRPC server code. // -// It updates the status of the application and handles restarting the application is needed. +// It updates the status of the application and handles restarting the application when needed. func (a *Application) OnStatusChange(s *server.ApplicationState, status proto.StateObserved_Status, msg string, payload map[string]interface{}) { a.appLock.Lock() defer a.appLock.Unlock() From 74f13cf9f5585e82055923ef292714c9171d6c7d Mon Sep 17 00:00:00 2001 From: Marcin Tojek Date: Mon, 21 Feb 2022 18:03:08 +0100 Subject: [PATCH 05/13] Fix Docker module: rename fields on dashboards (#30500) * Fix Docker module: rename fields on dashboards * Fix CHANGELOG --- CHANGELOG.next.asciidoc | 1 + .../_meta/kibana/7/visualization/Docker-Network-IO-ecs.json | 6 +++--- .../_meta/kibana/7/visualization/Docker-containers-ecs.json | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index a07654b6e34..73245a03c97 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -64,6 +64,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif - Enhance metricbeat on openshift documentation {pull}30054[30054] - Fixed missing ZooKeeper metrics due compatibility issues with versions >= 3.6.0 {pull}30068[30068] +- Fix Docker module: rename fields on dashboards. {pull}30500[30500] *Packetbeat* diff --git a/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-Network-IO-ecs.json b/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-Network-IO-ecs.json index c93a18e3e3a..af3e5261c4c 100644 --- a/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-Network-IO-ecs.json +++ b/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-Network-IO-ecs.json @@ -34,7 +34,7 @@ "id": "1", "params": { "customLabel": "IN bytes", - "field": "docker.network.in.bytes" + "field": "docker.network.inbound.bytes" }, "schema": "metric", "type": "max" @@ -69,7 +69,7 @@ "id": "4", "params": { "customLabel": "OUT bytes", - "field": "docker.network.out.bytes" + "field": "docker.network.outbound.bytes" }, "schema": "metric", "type": "max" @@ -171,4 +171,4 @@ "type": "visualization", "updated_at": "2021-08-04T16:31:07.529Z", "version": "WzM3NjQsMV0=" -} \ No newline at end of file +} diff --git a/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-containers-ecs.json b/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-containers-ecs.json index 1c5bee9fd7f..0d6eadff796 100644 --- a/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-containers-ecs.json +++ b/metricbeat/module/docker/_meta/kibana/7/visualization/Docker-containers-ecs.json @@ -49,7 +49,7 @@ "id": "4", "params": { "customLabel": "DiskIO", - "field": "docker.diskio.total" + "field": "docker.diskio.summary.bytes" }, "schema": "metric", "type": "max" @@ -117,4 +117,4 @@ "type": "visualization", "updated_at": "2021-08-04T16:31:07.529Z", "version": "WzM3NTgsMV0=" -} \ No newline at end of file +} From d74e7aae6da99145fffc550c25f1117041c56318 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Tue, 22 Feb 2022 11:26:01 +1030 Subject: [PATCH 06/13] packetbeat/beater: don't attempt to install npcap when already installed (#30509) * don't attempt to install npcap when already installed * unload DLL during install operation --- CHANGELOG.next.asciidoc | 2 +- packetbeat/beater/install_npcap.go | 3 +++ packetbeat/npcap/npcap.go | 14 +++++++++++++- packetbeat/npcap/npcap_other.go | 2 +- packetbeat/npcap/npcap_windows.go | 8 +------- 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 73245a03c97..298142ae9f4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -140,7 +140,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif *Packetbeat* -- Add automated OEM Npcap installation handling. {pull}29112[29112] {pull}30438[30438] +- Add automated OEM Npcap installation handling. {pull}29112[29112] {pull}30438[30438] {pull}30493[30493] - Add support for capturing TLS random number and OCSP status request details. {issue}29962[29962] {pull}30102[30102] *Functionbeat* diff --git a/packetbeat/beater/install_npcap.go b/packetbeat/beater/install_npcap.go index e947bca5b01..d15ac21479a 100644 --- a/packetbeat/beater/install_npcap.go +++ b/packetbeat/beater/install_npcap.go @@ -51,6 +51,9 @@ func installNpcap(b *beat.Beat) error { log.Infof("npcap version: %s", npcapVersion) } }() + if !npcap.Upgradeable() { + return nil + } ctx, cancel := context.WithTimeout(context.Background(), installTimeout) defer cancel() diff --git a/packetbeat/npcap/npcap.go b/packetbeat/npcap/npcap.go index c81d1ce731d..d0cc42dce48 100644 --- a/packetbeat/npcap/npcap.go +++ b/packetbeat/npcap/npcap.go @@ -68,6 +68,18 @@ func Install(ctx context.Context, log *logp.Logger, path, dst string, compat boo } func install(ctx context.Context, log *logp.Logger, path, dst string, compat bool) error { + if pcap.Version() != "" { + // If we are here there is a runtime Npcap DLL loaded. We need to + // unload this to prevent the application being killed during the + // install. + // + // See https://npcap.com/guide/npcap-users-guide.html#npcap-installation-uninstall-options. + err := unloadWinPCAP() + if err != nil { + return fmt.Errorf("npcap: failed to unload Npcap DLL: %w", err) + } + } + args := []string{"/S", "/winpcap_mode=no"} if compat { args[1] = "/winpcap_mode=yes" @@ -96,7 +108,7 @@ func install(ctx context.Context, log *logp.Logger, path, dst string, compat boo return fmt.Errorf("npcap: failed to install Npcap: %w", err) } - return reloadWinPCAP() + return loadWinPCAP() } func Upgradeable() bool { diff --git a/packetbeat/npcap/npcap_other.go b/packetbeat/npcap/npcap_other.go index c813644d471..7f0d29c09e6 100644 --- a/packetbeat/npcap/npcap_other.go +++ b/packetbeat/npcap/npcap_other.go @@ -22,4 +22,4 @@ package npcap func loadWinPCAP() error { return nil } -func reloadWinPCAP() error { return nil } +func unloadWinPCAP() error { return nil } diff --git a/packetbeat/npcap/npcap_windows.go b/packetbeat/npcap/npcap_windows.go index 44d0053820f..3e08bf4a1ee 100644 --- a/packetbeat/npcap/npcap_windows.go +++ b/packetbeat/npcap/npcap_windows.go @@ -24,10 +24,4 @@ import "github.com/google/gopacket/pcap" func loadWinPCAP() error { return pcap.LoadWinPCAP() } -func reloadWinPCAP() error { - err := pcap.UnloadWinPCAP() - if err != nil { - return err - } - return pcap.LoadWinPCAP() -} +func unloadWinPCAP() error { return pcap.UnloadWinPCAP() } From ca4a87a1b66c2234763b13c3d6882b3d2718c379 Mon Sep 17 00:00:00 2001 From: Mat Schaffer Date: Tue, 22 Feb 2022 18:55:28 +0900 Subject: [PATCH 07/13] Switch skip to use `CI` (#30512) Fairly certain `BUILD_ID` is jenkins-specific. --- auditbeat/tests/system/test_file_integrity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auditbeat/tests/system/test_file_integrity.py b/auditbeat/tests/system/test_file_integrity.py index 08fef13d8e5..280d2916a55 100644 --- a/auditbeat/tests/system/test_file_integrity.py +++ b/auditbeat/tests/system/test_file_integrity.py @@ -62,7 +62,7 @@ def wait_output(self, min_events): else: break - @unittest.skipIf(os.getenv("BUILD_ID") is not None and platform.system() == 'Darwin', + @unittest.skipIf(os.getenv("CI") is not None and platform.system() == 'Darwin', 'Flaky test: https://github.com/elastic/beats/issues/24678') def test_non_recursive(self): """ From 0ec4dc500c75d8d0f22165820691b0ef0631b031 Mon Sep 17 00:00:00 2001 From: Andres Rodriguez Date: Tue, 22 Feb 2022 11:34:55 +0100 Subject: [PATCH 08/13] Forward-port 8.0.1 changelog to main (#30522) * Forward-port 8.0.1 changelog to 8.1 (#30517) * docs: Prepare Changelog for 8.0.1 (#30508) * docs: Close changelog for 8.0.1 * Move entry that did not make the BC * Clean empty sections Co-authored-by: Andres Rodriguez (cherry picked from commit 6842e1eec8dc56fdc316a07a623a8bf5fd89f2e8) * Additional cleanup Co-authored-by: Elastic Machine (cherry picked from commit 1065e3f934606efd8bb7bb681826f5fe7738eb35) * Additional cleanup --- CHANGELOG.asciidoc | 26 ++++++++++++++++++++++++++ CHANGELOG.next.asciidoc | 8 -------- libbeat/docs/release.asciidoc | 1 + 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 8573206e620..05c7e883534 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,32 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.0.1]] +=== Beats version 8.0.1 +https://github.com/elastic/beats/compare/v8.0.0...v8.0.1[View commits] + +==== Bugfixes + +*Filebeat* + +- tcp/unix input: Stop accepting connections after socket is closed. {pull}29712[29712] +- Fix using log_group_name_prefix in aws-cloudwatch input. {pull}29695[29695] +- Fix multiple instances of the same module configured within `filebeat.modules` in filebeat.yml. {issue}29649[29649] {pull}29952[29952] +- aws-s3: fix race condition in states used by s3-poller. {issue}30123[30123] {pull}30131[30131] + +*Filebeat* +- Fix broken Kafka input {issue}29746[29746] {pull}30277[30277] +- cisco module: Fix change the broke ASA and FTD configs that used `var.input: syslog`. {pull}30072[30072] +- aws-s3: fix race condition in states used by s3-poller. {issue}30123[30123] {pull}30131[30131] + +*Heartbeat* +- Fix missing mapping for `service.name`. {pull}30324[30324] + +*Winlogbeat* + +- Fix run loop when reading from evtx file {pull}30006[30006] + + [[release-notes-8.0.0]] === Beats version 8.0.0 https://github.com/elastic/beats/compare/v7.17.0...v8.0.0[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 298142ae9f4..86faa1d39e0 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -48,17 +48,11 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif *Filebeat* -- tcp/unix input: Stop accepting connections after socket is closed. {pull}29712[29712] -- Fix using log_group_name_prefix in aws-cloudwatch input. {pull}29695[29695] -- Fix multiple instances of the same module configured within `filebeat.modules` in filebeat.yml. {issue}29649[29649] {pull}29952[29952] -- aws-s3: fix race condition in states used by s3-poller. {issue}30123[30123] {pull}30131[30131] -- Fix broken Kafka input {issue}29746[29746] {pull}30277[30277] - Report the starting offset of the line in `log.offset` when using `filestream` instead of the end to be ECS compliant. {pull}30445[30445] - auditd: Prevent mapping explosion when truncated EXECVE records are ingested. {pull}30382[30382] - elasticsearch: fix duplicate ingest when using a common appender configuration {issue}30428[30428] {pull}30440[30440] *Heartbeat* -- Fix missing mapping for `service.name`. {pull}30324[30324] *Metricbeat* @@ -72,7 +66,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif *Winlogbeat* - Add provider names to Security pipeline conditional check in routing pipeline. {issue}27288[27288] {pull}29781[29781] -- Fix run loop when reading from evtx file {pull}30006[30006] *Functionbeat* @@ -173,4 +166,3 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif ==== Known Issue -*Journalbeat* diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 0ce6f44b2df..aa997f284ae 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> From bc1c6534497da0ebcd222a4fc05133e8d8185dea Mon Sep 17 00:00:00 2001 From: Tetiana Kravchenko Date: Tue, 22 Feb 2022 13:40:30 +0100 Subject: [PATCH 09/13] extend documentation about setting orchestrator.cluster fields (#30518) * extend documentation about setting orchestrator.cluster fields Signed-off-by: Tetiana Kravchenko * changelog: fix typo; add pr link Signed-off-by: Tetiana Kravchenko --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/modules/kubernetes.asciidoc | 11 ++++++++++- metricbeat/module/kubernetes/_meta/docs.asciidoc | 11 ++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 86faa1d39e0..c6f0212b98f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -130,6 +130,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif - Add gcp firestore metricset. {pull}29918[29918] - Remove strict parsing on RabbitMQ module {pull}30090[30090] - Add `kubernetes.container.status.last.reason` metric {pull}30306[30306] +- Extend documentation about `orchestrator.cluster` fields {pull}30518[30518] *Packetbeat* diff --git a/metricbeat/docs/modules/kubernetes.asciidoc b/metricbeat/docs/modules/kubernetes.asciidoc index dc6873daece..bbc11556ee2 100644 --- a/metricbeat/docs/modules/kubernetes.asciidoc +++ b/metricbeat/docs/modules/kubernetes.asciidoc @@ -166,7 +166,16 @@ If you are using HA for those components, be aware that when gathering data from Dashboards for `controllermanager` `scheduler` and `proxy` are not compatible with kibana versions below `7.2.0` -Cluster selector in `cluster overview` dashboard helps in distinguishing and filtering metrics collected from multiple clusters. If you want to focus on a subset of the Kubernetes clusters for monitoring a specific scenario, this cluster selector could be a handy tool. Note that this selector gets populated from the `orchestrator.cluster.name` field that may not always be available. This field gets its value from sources like `kube_config`, `kubeadm-config` configMap, and Google Cloud's meta API for GKE. If the sources mentioned above don't provide this value, metricbeat will not report it. However, you can always use https://www.elastic.co/guide/en/beats/metricbeat/current/defining-processors.html[processors] to set this field and utilize it in the `cluster overview` dashboard. +Cluster selector in `cluster overview` dashboard helps in distinguishing and filtering metrics collected from multiple clusters. If you want to focus on a subset of the Kubernetes clusters for monitoring a specific scenario, this cluster selector could be a handy tool. Note that this selector gets populated from the `orchestrator.cluster.name` field that may not always be available. This field gets its value from sources like `kube_config`, `kubeadm-config` configMap, and Google Cloud's meta API for GKE. If the sources mentioned above don't provide this value, metricbeat will not report it. However, you can always use https://www.elastic.co/guide/en/beats/filebeat/current/add-fields.html[add_fields processor] to set `orchestrator.cluster.name` fields and utilize it in the `cluster overview` dashboard: +[source,yaml] +---- +processors: + - add_fields: + target: orchestrator.cluster + fields: + name: clusterName + url: clusterURL +---- Kubernetes cluster overview example: diff --git a/metricbeat/module/kubernetes/_meta/docs.asciidoc b/metricbeat/module/kubernetes/_meta/docs.asciidoc index 8e8e8419523..e2c2ac82ecf 100644 --- a/metricbeat/module/kubernetes/_meta/docs.asciidoc +++ b/metricbeat/module/kubernetes/_meta/docs.asciidoc @@ -157,7 +157,16 @@ If you are using HA for those components, be aware that when gathering data from Dashboards for `controllermanager` `scheduler` and `proxy` are not compatible with kibana versions below `7.2.0` -Cluster selector in `cluster overview` dashboard helps in distinguishing and filtering metrics collected from multiple clusters. If you want to focus on a subset of the Kubernetes clusters for monitoring a specific scenario, this cluster selector could be a handy tool. Note that this selector gets populated from the `orchestrator.cluster.name` field that may not always be available. This field gets its value from sources like `kube_config`, `kubeadm-config` configMap, and Google Cloud's meta API for GKE. If the sources mentioned above don't provide this value, metricbeat will not report it. However, you can always use https://www.elastic.co/guide/en/beats/metricbeat/current/defining-processors.html[processors] to set this field and utilize it in the `cluster overview` dashboard. +Cluster selector in `cluster overview` dashboard helps in distinguishing and filtering metrics collected from multiple clusters. If you want to focus on a subset of the Kubernetes clusters for monitoring a specific scenario, this cluster selector could be a handy tool. Note that this selector gets populated from the `orchestrator.cluster.name` field that may not always be available. This field gets its value from sources like `kube_config`, `kubeadm-config` configMap, and Google Cloud's meta API for GKE. If the sources mentioned above don't provide this value, metricbeat will not report it. However, you can always use https://www.elastic.co/guide/en/beats/filebeat/current/add-fields.html[add_fields processor] to set `orchestrator.cluster.name` fields and utilize it in the `cluster overview` dashboard: +[source,yaml] +---- +processors: + - add_fields: + target: orchestrator.cluster + fields: + name: clusterName + url: clusterURL +---- Kubernetes cluster overview example: From a159cceeae79d5fa57ec308d427890c16d93224c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Tue, 22 Feb 2022 15:48:15 +0100 Subject: [PATCH 10/13] Add `parsers` examples to `filestream` reference configuration (#30529) --- .../config/filebeat.inputs.reference.yml.tmpl | 95 +++++++++++++++++++ filebeat/filebeat.reference.yml | 95 +++++++++++++++++++ x-pack/filebeat/filebeat.reference.yml | 95 +++++++++++++++++++ 3 files changed, 285 insertions(+) diff --git a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl index 010e5e36e2f..c1e5fd55d72 100644 --- a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl +++ b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl @@ -293,6 +293,101 @@ filebeat.inputs: # original for harvesting but will report the symlink name as source. #prospector.scanner.symlinks: false + ### Parsers configuration + + #### JSON configuration + + #parsers: + #- ndjson: + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be a string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied to the top level of the output document. + #keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #overwrite_keys: false + + # If this setting is enabled, then keys in the decoded JSON object will be recursively + # de-dotted, and expanded into a hierarchical object structure. + # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. + #expand_keys: false + + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #add_error_key: false + + #### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + #parsers: + #- multiline: + #type: pattern + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #pattern: ^\[ + + # Defines if the pattern set under the pattern setting should be negated or not. Default is false. + #negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to next in Logstash + #match: after + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + # To aggregate constant number of lines into a single event use the count mode of multiline. + + #parsers: + #- multiline: + #type: count + + # The number of lines to aggregate into a single event. + #count_lines: 3 + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + #### Parsing container events + + # You can parse container events with different formats from all streams. + + #parsers: + #- container: + # Source of container events. Available options: all, stdin, stderr. + #stream: all + + # Format of the container events. Available options: auto, cri, docker, json-file + #format: auto + ### Log rotation # When an external tool rotates the input files with copytruncate strategy diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 90d614545dc..67765b0f6d0 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -700,6 +700,101 @@ filebeat.inputs: # original for harvesting but will report the symlink name as source. #prospector.scanner.symlinks: false + ### Parsers configuration + + #### JSON configuration + + #parsers: + #- ndjson: + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be a string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied to the top level of the output document. + #keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #overwrite_keys: false + + # If this setting is enabled, then keys in the decoded JSON object will be recursively + # de-dotted, and expanded into a hierarchical object structure. + # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. + #expand_keys: false + + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #add_error_key: false + + #### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + #parsers: + #- multiline: + #type: pattern + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #pattern: ^\[ + + # Defines if the pattern set under the pattern setting should be negated or not. Default is false. + #negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to next in Logstash + #match: after + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + # To aggregate constant number of lines into a single event use the count mode of multiline. + + #parsers: + #- multiline: + #type: count + + # The number of lines to aggregate into a single event. + #count_lines: 3 + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + #### Parsing container events + + # You can parse container events with different formats from all streams. + + #parsers: + #- container: + # Source of container events. Available options: all, stdin, stderr. + #stream: all + + # Format of the container events. Available options: auto, cri, docker, json-file + #format: auto + ### Log rotation # When an external tool rotates the input files with copytruncate strategy diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index becffd39a6b..5cfa51f4a6a 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -2759,6 +2759,101 @@ filebeat.inputs: # original for harvesting but will report the symlink name as source. #prospector.scanner.symlinks: false + ### Parsers configuration + + #### JSON configuration + + #parsers: + #- ndjson: + # Decode JSON options. Enable this if your logs are structured in JSON. + # JSON key on which to apply the line filtering and multiline settings. This key + # must be top level and its value must be a string, otherwise it is ignored. If + # no text key is defined, the line filtering and multiline features cannot be used. + #message_key: + + # By default, the decoded JSON is placed under a "json" key in the output document. + # If you enable this setting, the keys are copied to the top level of the output document. + #keys_under_root: false + + # If keys_under_root and this setting are enabled, then the values from the decoded + # JSON object overwrite the fields that Filebeat normally adds (type, source, offset, etc.) + # in case of conflicts. + #overwrite_keys: false + + # If this setting is enabled, then keys in the decoded JSON object will be recursively + # de-dotted, and expanded into a hierarchical object structure. + # For example, `{"a.b.c": 123}` would be expanded into `{"a":{"b":{"c":123}}}`. + #expand_keys: false + + # If this setting is enabled, Filebeat adds an "error.message" and "error.key: json" key in case of JSON + # unmarshaling errors or when a text key is defined in the configuration but cannot + # be used. + #add_error_key: false + + #### Multiline options + + # Multiline can be used for log messages spanning multiple lines. This is common + # for Java Stack Traces or C-Line Continuation + + #parsers: + #- multiline: + #type: pattern + # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [ + #pattern: ^\[ + + # Defines if the pattern set under the pattern setting should be negated or not. Default is false. + #negate: false + + # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern + # that was (not) matched before or after or as long as a pattern is not matched based on negate. + # Note: After is the equivalent to previous and before is the equivalent to next in Logstash + #match: after + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, a multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + # To aggregate constant number of lines into a single event use the count mode of multiline. + + #parsers: + #- multiline: + #type: count + + # The number of lines to aggregate into a single event. + #count_lines: 3 + + # The maximum number of lines that are combined to one event. + # In case there are more than max_lines the additional lines are discarded. + # Default is 500 + #max_lines: 500 + + # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event + # Default is 5s. + #timeout: 5s + + # Do not add new line character when concatenating lines. + #skip_newline: false + + #### Parsing container events + + # You can parse container events with different formats from all streams. + + #parsers: + #- container: + # Source of container events. Available options: all, stdin, stderr. + #stream: all + + # Format of the container events. Available options: auto, cri, docker, json-file + #format: auto + ### Log rotation # When an external tool rotates the input files with copytruncate strategy From d699e19fd3326e064c99b804ebabaea36344e76e Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Tue, 22 Feb 2022 10:25:26 -0500 Subject: [PATCH 11/13] Update docker/distribution to 2.8.0 (#30462) --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 3 ++- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index c6f0212b98f..a0aaea48833 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -41,6 +41,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...main[Check the HEAD dif - Fix field names with `add_network_direction` processor. {issue}29747[29747] {pull}29751[29751] - Fix a logging bug when `ssl.verification_mode` was set to `full` or `certificate`, the command `test output` incorrectly logged that TLS was disabled. - Fix the ability for subcommands to be ran properly from the beats containers. {pull}30452[30452] +- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull}30462[30462] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index e96e88a7b85..7dddd44d077 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -23825,11 +23825,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/docker/distribution -Version: v2.7.1+incompatible +Version: v2.8.0+incompatible Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/docker/distribution@v2.7.1+incompatible/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/docker/distribution@v2.8.0+incompatible/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 1bad66722d5..414ce1e38de 100644 --- a/go.mod +++ b/go.mod @@ -215,7 +215,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dimchansky/utfbom v1.1.0 // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/distribution v2.8.0+incompatible // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect diff --git a/go.sum b/go.sum index 5974ef527e0..73d67b979fc 100644 --- a/go.sum +++ b/go.sum @@ -473,8 +473,9 @@ github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf/go.mod h1:2pZnwu github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY= +github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20170802015333-8af4db6f002a/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible h1:Z6O9Nhsjv+ayUEeI1IojKbYcsGdgYSNqxe1s2MYzUhQ= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= From 1499d30edf2d015d54922a85c5bd7d68889d843c Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 22 Feb 2022 13:12:01 -0500 Subject: [PATCH 12/13] [Automation] Update elastic stack version to 8.2.0-74785f8b for testing (#30441) Co-authored-by: apmmachine Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 9893632c72d..56f319b53c7 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.2.0-ff5ac1e4-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.2.0-74785f8b-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -21,7 +21,7 @@ services: - "script.context.template.cache_max_size=2000" logstash: - image: docker.elastic.co/logstash/logstash-oss:8.2.0-ff5ac1e4-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:8.2.0-74785f8b-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -31,7 +31,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.2.0-ff5ac1e4-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.2.0-74785f8b-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 873d80ca619..e3d38f0b6c1 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.2.0-ff5ac1e4-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.2.0-74785f8b-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.2.0-ff5ac1e4-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.2.0-74785f8b-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 128cbf46d4c00c3fcec8f89e6117837c19ee94b4 Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Wed, 23 Feb 2022 07:43:53 +1030 Subject: [PATCH 13/13] {,x-pack/}winlogbeat: clean up dead code (#30491) * {,x-pack/}winlogbeat: run gofumpt gofumpt -w ./{,x-pack/}winlogbeat * {,x-pack/}winlogbeat: remove vars and consts This removes the majority of unused identified by running: staticcheck -checks 'U*' ./{,x-pack/}winlogbeat/... Care has been taken to not remove platform-specific code and mage-required code. --- winlogbeat/beater/eventlogger.go | 3 +-- winlogbeat/beater/winlogbeat.go | 4 ---- winlogbeat/checkpoint/checkpoint.go | 2 +- winlogbeat/checkpoint/checkpoint_test.go | 2 +- winlogbeat/checkpoint/file_unix.go | 2 +- winlogbeat/config/config.go | 10 ++++------ winlogbeat/eventlog/cache.go | 4 ++-- winlogbeat/eventlog/wineventlog_test.go | 5 ----- winlogbeat/scripts/mage/docs.go | 2 +- winlogbeat/scripts/mage/package.go | 6 +++--- winlogbeat/scripts/mage/update.go | 6 ++---- winlogbeat/sys/wineventlog/renderer.go | 4 ++-- x-pack/winlogbeat/module/sysmon/gen_dns_error_codes.go | 6 ++++-- x-pack/winlogbeat/module/testing_windows.go | 4 ++-- 14 files changed, 24 insertions(+), 36 deletions(-) diff --git a/winlogbeat/beater/eventlogger.go b/winlogbeat/beater/eventlogger.go index 390a2fb3975..d83bdfcadbf 100644 --- a/winlogbeat/beater/eventlogger.go +++ b/winlogbeat/beater/eventlogger.go @@ -195,8 +195,7 @@ func processorsForConfig( // added before the user processors. if !config.Index.IsEmpty() { staticFields := fmtstr.FieldsForBeat(beatInfo.Beat, beatInfo.Version) - timestampFormat, err := - fmtstr.NewTimestampFormatString(&config.Index, staticFields) + timestampFormat, err := fmtstr.NewTimestampFormatString(&config.Index, staticFields) if err != nil { return nil, err } diff --git a/winlogbeat/beater/winlogbeat.go b/winlogbeat/beater/winlogbeat.go index ec7ebf90ef6..d23d47db60d 100644 --- a/winlogbeat/beater/winlogbeat.go +++ b/winlogbeat/beater/winlogbeat.go @@ -25,7 +25,6 @@ import ( "context" "fmt" "sync" - "time" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" @@ -44,9 +43,6 @@ const pipelinesWarning = "Winlogbeat is unable to load the ingest pipelines" + " because the Elasticsearch output is not configured/enabled. If you have" + " already loaded the ingest pipelines, you can ignore this warning." -// Time the application was started. -var startTime = time.Now().UTC() - // Winlogbeat is used to conform to the beat interface type Winlogbeat struct { beat *beat.Beat // Common beat information. diff --git a/winlogbeat/checkpoint/checkpoint.go b/winlogbeat/checkpoint/checkpoint.go index a93b29abed1..8dfdef2cb68 100644 --- a/winlogbeat/checkpoint/checkpoint.go +++ b/winlogbeat/checkpoint/checkpoint.go @@ -275,5 +275,5 @@ func (c *Checkpoint) read() (*PersistedState, error) { func (c *Checkpoint) createDir() error { dir := filepath.Dir(c.file) logp.Info("Creating %s if it does not exist.", dir) - return os.MkdirAll(dir, os.FileMode(0750)) + return os.MkdirAll(dir, os.FileMode(0o750)) } diff --git a/winlogbeat/checkpoint/checkpoint_test.go b/winlogbeat/checkpoint/checkpoint_test.go index ef9e70cbeb1..b626fea1392 100644 --- a/winlogbeat/checkpoint/checkpoint_test.go +++ b/winlogbeat/checkpoint/checkpoint_test.go @@ -132,7 +132,7 @@ func TestCreateDir(t *testing.T) { fileInfo, err := os.Stat(stateDir) if assert.NoError(t, err) { assert.Equal(t, true, fileInfo.IsDir()) - assert.Equal(t, os.FileMode(0750), fileInfo.Mode().Perm()) + assert.Equal(t, os.FileMode(0o750), fileInfo.Mode().Perm()) } } } diff --git a/winlogbeat/checkpoint/file_unix.go b/winlogbeat/checkpoint/file_unix.go index 248b5301955..8bb6545eb26 100644 --- a/winlogbeat/checkpoint/file_unix.go +++ b/winlogbeat/checkpoint/file_unix.go @@ -23,5 +23,5 @@ package checkpoint import "os" func create(path string) (*os.File, error) { - return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, 0600) + return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, 0o600) } diff --git a/winlogbeat/config/config.go b/winlogbeat/config/config.go index ed8f2195d02..4db27edcfa6 100644 --- a/winlogbeat/config/config.go +++ b/winlogbeat/config/config.go @@ -32,12 +32,10 @@ const ( DefaultRegistryFile = ".winlogbeat.yml" ) -var ( - DefaultSettings = WinlogbeatConfig{ - RegistryFile: DefaultRegistryFile, - RegistryFlush: 5 * time.Second, - } -) +var DefaultSettings = WinlogbeatConfig{ + RegistryFile: DefaultRegistryFile, + RegistryFlush: 5 * time.Second, +} // WinlogbeatConfig contains all of Winlogbeat configuration data. type WinlogbeatConfig struct { diff --git a/winlogbeat/eventlog/cache.go b/winlogbeat/eventlog/cache.go index 04c11a6c621..cb6bf5576d1 100644 --- a/winlogbeat/eventlog/cache.go +++ b/winlogbeat/eventlog/cache.go @@ -65,8 +65,8 @@ type messageFilesCache struct { // initialized (including starting a periodic janitor goroutine to purge // expired Handles). func newMessageFilesCache(eventLogName string, loader messageFileLoaderFunc, - freer freeHandleFunc) *messageFilesCache { - + freer freeHandleFunc, +) *messageFilesCache { size := &expvar.Int{} cacheStats.Set(eventLogName+"Size", size) diff --git a/winlogbeat/eventlog/wineventlog_test.go b/winlogbeat/eventlog/wineventlog_test.go index c5d92ce1873..0dc33b5098a 100644 --- a/winlogbeat/eventlog/wineventlog_test.go +++ b/winlogbeat/eventlog/wineventlog_test.go @@ -52,11 +52,6 @@ const ( // EventCreate.exe has valid event IDs in the range of 1-1000 where each // event message requires a single parameter. eventCreateMsgFile = "%SystemRoot%\\System32\\EventCreate.exe" - // services.exe is used by the Service Control Manager as its event message - // file; these tests use it to log messages with more than one parameter. - servicesMsgFile = "%SystemRoot%\\System32\\services.exe" - // netevent.dll has messages that require no message parameters. - netEventMsgFile = "%SystemRoot%\\System32\\netevent.dll" ) func TestWinEventLogConfig_Validate(t *testing.T) { diff --git a/winlogbeat/scripts/mage/docs.go b/winlogbeat/scripts/mage/docs.go index 14980b63d73..0b59d3e79b1 100644 --- a/winlogbeat/scripts/mage/docs.go +++ b/winlogbeat/scripts/mage/docs.go @@ -91,5 +91,5 @@ func moduleDocs() error { } fmt.Printf(">> update:moduleDocs: Collecting module documentation for %v.\n", strings.Join(names, ", ")) - return ioutil.WriteFile(mage.OSSBeatDir("docs/modules_list.asciidoc"), []byte(content), 0644) + return ioutil.WriteFile(mage.OSSBeatDir("docs/modules_list.asciidoc"), []byte(content), 0o644) } diff --git a/winlogbeat/scripts/mage/package.go b/winlogbeat/scripts/mage/package.go index 80768e9110c..81e090092bd 100644 --- a/winlogbeat/scripts/mage/package.go +++ b/winlogbeat/scripts/mage/package.go @@ -72,7 +72,7 @@ func customizePackaging() { mg.Deps(prepareModulePackaging) moduleDir := devtools.PackageFile{ - Mode: 0644, + Mode: 0o644, Source: dirModuleGenerated, Config: true, Modules: true, @@ -102,8 +102,8 @@ func prepareModulePackaging() error { return (&devtools.CopyTask{ Source: devtools.XPackBeatDir("module"), Dest: dirModuleGenerated, - Mode: 0644, - DirMode: 0755, + Mode: 0o644, + DirMode: 0o755, Exclude: []string{ "/_meta", "/test", diff --git a/winlogbeat/scripts/mage/update.go b/winlogbeat/scripts/mage/update.go index 2c7d4dd840c..f85e75ef1eb 100644 --- a/winlogbeat/scripts/mage/update.go +++ b/winlogbeat/scripts/mage/update.go @@ -35,10 +35,8 @@ func init() { docs.RegisterDeps(Update.FieldDocs, Update.ModuleDocs) } -var ( - // SelectLogic configures the types of project logic to use (OSS vs X-Pack). - SelectLogic devtools.ProjectType -) +// SelectLogic configures the types of project logic to use (OSS vs X-Pack). +var SelectLogic devtools.ProjectType // Update target namespace. type Update mg.Namespace diff --git a/winlogbeat/sys/wineventlog/renderer.go b/winlogbeat/sys/wineventlog/renderer.go index 8a16567e7ad..4f9dbd4539f 100644 --- a/winlogbeat/sys/wineventlog/renderer.go +++ b/winlogbeat/sys/wineventlog/renderer.go @@ -364,8 +364,8 @@ func (r *Renderer) addEventData(evtMeta *EventMetadata, values []interface{}, ev // formatMessage adds the message to the event. func (r *Renderer) formatMessage(publisherMeta *PublisherMetadataStore, eventMeta *EventMetadata, eventHandle EvtHandle, values []interface{}, - eventID uint16) (string, error) { - + eventID uint16) (string, error, +) { if eventMeta != nil { if eventMeta.MsgStatic != "" { return eventMeta.MsgStatic, nil diff --git a/x-pack/winlogbeat/module/sysmon/gen_dns_error_codes.go b/x-pack/winlogbeat/module/sysmon/gen_dns_error_codes.go index b2ce22ad3a0..ff38473131f 100644 --- a/x-pack/winlogbeat/module/sysmon/gen_dns_error_codes.go +++ b/x-pack/winlogbeat/module/sysmon/gen_dns_error_codes.go @@ -31,8 +31,10 @@ import ( // See https://docs.microsoft.com/en-us/windows/win32/debug/system-error-code-lookup-tool // for details about the Microsoft Error Lookup Tool. -const microsoftErrorToolURL = "https://download.microsoft.com/download/4/3/2/432140e8-fb6c-4145-8192-25242838c542/Err_6.4.5/Err_6.4.5.exe" -const microsoftErrorToolSha256 = "88739EC82BA16A0B4A3C83C1DD2FCA6336AD8E2A1E5F1238C085B1E86AB8834A" +const ( + microsoftErrorToolURL = "https://download.microsoft.com/download/4/3/2/432140e8-fb6c-4145-8192-25242838c542/Err_6.4.5/Err_6.4.5.exe" + microsoftErrorToolSha256 = "88739EC82BA16A0B4A3C83C1DD2FCA6336AD8E2A1E5F1238C085B1E86AB8834A" +) var includeCodes = []uint64{ 5, diff --git a/x-pack/winlogbeat/module/testing_windows.go b/x-pack/winlogbeat/module/testing_windows.go index 058e2320a9d..537adafb595 100644 --- a/x-pack/winlogbeat/module/testing_windows.go +++ b/x-pack/winlogbeat/module/testing_windows.go @@ -188,12 +188,12 @@ func writeGolden(t testing.TB, source string, events []common.MapStr) { t.Fatal(err) } - if err := os.MkdirAll("testdata", 0755); err != nil { + if err := os.MkdirAll("testdata", 0o755); err != nil { t.Fatal(err) } outPath := filepath.Join("testdata", filepath.Base(source)+".golden.json") - if err := ioutil.WriteFile(outPath, data, 0644); err != nil { + if err := ioutil.WriteFile(outPath, data, 0o644); err != nil { t.Fatal(err) } }