From 633f7dc1edc8e426453f745481cf28c599b09c29 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Mon, 13 Dec 2021 22:41:46 +0200 Subject: [PATCH 01/57] Update k8s library (#29394) Signed-off-by: chrismark --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 312 +++++++++++++++++++++++++++++++++++----- go.mod | 31 ++-- go.sum | 65 ++++++--- 4 files changed, 346 insertions(+), 63 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e6e1cab2f18..38a1f05c043 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -227,6 +227,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - SASL/SCRAM in the Kafka output is no longer beta. {pull}29126[29126] - Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] - Support self signed certificates on outputs {pull}29229[29229] +- Update k8s library {pull}29394[29394] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 5761502320c..148e4680e06 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -10434,11 +10434,11 @@ Contents of probable licence file $GOMODCACHE/github.com/gorhill/cronexpr@v0.0.0 -------------------------------------------------------------------------------- Dependency : github.com/gorilla/mux -Version: v1.7.3 +Version: v1.8.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.7.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.8.0/LICENSE: Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. @@ -16656,11 +16656,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : golang.org/x/crypto -Version: v0.0.0-20210616213533-5ff15b29337e +Version: v0.0.0-20210817164053-32db794688a5 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20210616213533-5ff15b29337e/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20210817164053-32db794688a5/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -18285,11 +18285,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : k8s.io/api -Version: v0.21.1 +Version: v0.23.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.21.1/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.23.0/LICENSE: Apache License @@ -18497,11 +18497,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.21.1/LICENSE: -------------------------------------------------------------------------------- Dependency : k8s.io/apimachinery -Version: v0.21.1 +Version: v0.23.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.21.1/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.23.0/LICENSE: Apache License @@ -18709,11 +18709,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.21.1/LICENS -------------------------------------------------------------------------------- Dependency : k8s.io/client-go -Version: v0.21.1 +Version: v0.23.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.21.1/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.23.0/LICENSE: Apache License @@ -24299,11 +24299,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/evanphx/json-patch -Version: v4.9.0+incompatible +Version: v4.12.0+incompatible Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/evanphx/json-patch@v4.9.0+incompatible/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/evanphx/json-patch@v4.12.0+incompatible/LICENSE: Copyright (c) 2014, Evan Phoenix All rights reserved. @@ -24677,11 +24677,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/go-logr/logr -Version: v0.4.0 +Version: v1.2.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v0.4.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v1.2.0/LICENSE: Apache License Version 2.0, January 2004 @@ -26779,11 +26779,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/googleapis/gnostic -Version: v0.4.1 +Version: v0.5.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.5.5/LICENSE: Apache License @@ -30450,11 +30450,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/json-iterator/go -Version: v1.1.11 +Version: v1.1.12 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/json-iterator/go@v1.1.11/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/json-iterator/go@v1.1.12/LICENSE: MIT License @@ -32036,11 +32036,11 @@ Contents of probable licence file $GOMODCACHE/github.com/modern-go/concurrent@v0 -------------------------------------------------------------------------------- Dependency : github.com/modern-go/reflect2 -Version: v1.0.1 +Version: v1.0.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/modern-go/reflect2@v1.0.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/modern-go/reflect2@v1.0.2/LICENSE: Apache License Version 2.0, January 2004 @@ -32309,11 +32309,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/onsi/ginkgo -Version: v1.12.1 +Version: v1.14.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/onsi/ginkgo@v1.12.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/onsi/ginkgo@v1.14.0/LICENSE: Copyright (c) 2013-2014 Onsi Fakhouri @@ -34558,11 +34558,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/term -Version: v0.0.0-20210220032956-6a3ed077a48d +Version: v0.0.0-20210615171337-6886f2dfbf5b Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.0.0-20210220032956-6a3ed077a48d/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.0.0-20210615171337-6886f2dfbf5b/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -36076,11 +36076,11 @@ third-party archives. -------------------------------------------------------------------------------- Dependency : k8s.io/klog/v2 -Version: v2.9.0 +Version: v2.30.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/klog/v2@v2.9.0/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/klog/v2@v2.30.0/LICENSE: Apache License Version 2.0, January 2004 @@ -36277,11 +36277,11 @@ third-party archives. -------------------------------------------------------------------------------- Dependency : k8s.io/kube-openapi -Version: v0.0.0-20210305001622-591a79e4bda7 +Version: v0.0.0-20211115234752-e816edb12b65 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-20210305001622-591a79e4bda7/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-20211115234752-e816edb12b65/LICENSE: Apache License @@ -36489,11 +36489,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-2021030 -------------------------------------------------------------------------------- Dependency : k8s.io/utils -Version: v0.0.0-20201110183641-67b214c5f920 +Version: v0.0.0-20210930125809-cb0fa318a74b Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20201110183641-67b214c5f920/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20210930125809-cb0fa318a74b/LICENSE: Apache License @@ -37105,13 +37105,261 @@ library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. +-------------------------------------------------------------------------------- +Dependency : sigs.k8s.io/json +Version: v0.0.0-20211020170558-c049b76a60c6 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/sigs.k8s.io/json@v0.0.0-20211020170558-c049b76a60c6/LICENSE: + +Files other than internal/golang/* licensed under: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +------------------ + +internal/golang/* files licensed under: + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : sigs.k8s.io/structured-merge-diff/v4 -Version: v4.1.0 +Version: v4.1.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/v4@v4.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/v4@v4.1.2/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 70af77f8e93..4cf5253b1a1 100644 --- a/go.mod +++ b/go.mod @@ -98,7 +98,7 @@ require ( github.com/google/gopacket v1.1.19 github.com/google/uuid v1.3.0 github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 - github.com/gorilla/mux v1.7.3 + github.com/gorilla/mux v1.8.0 github.com/h2non/filetype v1.1.1 github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/go-retryablehttp v0.6.6 @@ -162,7 +162,7 @@ require ( go.uber.org/atomic v1.8.0 go.uber.org/multierr v1.5.0 go.uber.org/zap v1.14.1 - golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e + golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/net v0.0.0-20211020060615-d418f374d309 golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 @@ -186,9 +186,9 @@ require ( gotest.tools v2.2.0+incompatible gotest.tools/gotestsum v0.6.0 howett.net/plist v0.0.0-20181124034731-591f970eefbb - k8s.io/api v0.21.1 - k8s.io/apimachinery v0.21.1 - k8s.io/client-go v0.21.1 + k8s.io/api v0.23.0 + k8s.io/apimachinery v0.23.0 + k8s.io/client-go v0.23.0 kernel.org/pub/linux/libs/security/libcap/cap v1.2.57 ) @@ -216,9 +216,9 @@ require ( github.com/docker/distribution v2.7.1+incompatible // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/evanphx/json-patch v4.9.0+incompatible // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fearful-symmetry/gomsr v0.0.1 // indirect - github.com/go-logr/logr v0.4.0 // indirect + github.com/go-logr/logr v1.2.0 // indirect github.com/gobuffalo/here v0.6.0 // indirect github.com/godbus/dbus/v5 v5.0.5 // indirect github.com/golang-jwt/jwt/v4 v4.0.0 // indirect @@ -229,7 +229,7 @@ require ( github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/gax-go/v2 v2.1.1 // indirect - github.com/googleapis/gnostic v0.4.1 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect github.com/gorilla/websocket v1.4.2 // indirect github.com/hashicorp/cronexpr v1.1.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect @@ -246,7 +246,7 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.11 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/karrick/godirwalk v1.15.6 // indirect github.com/klauspost/compress v1.13.6 // indirect github.com/markbates/pkger v0.17.0 // indirect @@ -257,7 +257,7 @@ require ( github.com/moby/spdystream v0.2.0 // indirect github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pierrec/lz4 v2.6.0+incompatible // indirect github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b // indirect github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect @@ -269,15 +269,16 @@ require ( go.elastic.co/fastjson v1.1.0 // indirect go.opencensus.io v0.23.0 // indirect golang.org/x/mod v0.5.1 // indirect - golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - k8s.io/klog/v2 v2.9.0 // indirect - k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 // indirect - k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect + k8s.io/klog/v2 v2.30.0 // indirect + k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect + k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect kernel.org/pub/linux/libs/security/libcap/psx v1.2.57 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.0 // indirect + sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index 28f0ed5236f..49cf15709b8 100644 --- a/go.sum +++ b/go.sum @@ -566,8 +566,9 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -577,6 +578,7 @@ github.com/fearful-symmetry/gorapl v0.0.4 h1:TMn4fhhtIAd+C3NrAl638oaYlX1vgcKNVVd github.com/fearful-symmetry/gorapl v0.0.4/go.mod h1:XoeZ+5v0tJX9WMvzqdPaaKAdX7y17mDN3pxDGemINR0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo= @@ -586,6 +588,7 @@ github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebP github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -608,8 +611,9 @@ github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.5-0.20190920104607-14974a1cf647 h1:whypLownH338a3Ork2w9t0KUKtVxbXYySuz7V1YGsJo= @@ -822,6 +826,7 @@ github.com/gomodule/redigo v1.8.3 h1:HR0kYDX2RJZvAup8CsiJwxB4dTCSC0AaUq6S4SiLwUc github.com/gomodule/redigo v1.8.3/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= @@ -882,8 +887,10 @@ github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.18.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= @@ -895,8 +902,9 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -1054,8 +1062,9 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -1201,8 +1210,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= @@ -1240,14 +1250,16 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.2.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= @@ -1479,6 +1491,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -1673,8 +1686,9 @@ golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1762,6 +1776,7 @@ golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1785,6 +1800,7 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1891,6 +1907,7 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1937,6 +1954,7 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1944,8 +1962,9 @@ golang.org/x/sys v0.0.0-20211102192858-4dd72447c267 h1:7zYaz3tjChtpayGDzu6H0hDAU golang.org/x/sys v0.0.0-20211102192858-4dd72447c267/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2028,6 +2047,7 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2143,6 +2163,7 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2301,14 +2322,16 @@ k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.21.1 h1:94bbZ5NTjdINJEdzOkpS4vdPhkb1VFpTYC9zh43f75c= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= +k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.21.1 h1:Q6XuHGlj2xc+hlMCvqyYfbv3H7SRGn2c8NycxJquDVs= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ= +k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -2316,8 +2339,9 @@ k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.21.1 h1:bhblWYLZKUu+pm50plvQF8WpY6TXdRRtcS/K9WauOj4= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= +k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY= +k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -2327,24 +2351,30 @@ k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= kernel.org/pub/linux/libs/security/libcap/cap v1.2.57 h1:2nmqI+aw7EQZuelYktkQHBE4jESD2tOR+lOJEnv/Apo= kernel.org/pub/linux/libs/security/libcap/cap v1.2.57/go.mod h1:uI99C3r4SXvJeuqoEtx/eWt7UbmfqqZ80H8q+9t/A7I= kernel.org/pub/linux/libs/security/libcap/psx v1.2.57 h1:NOFATXSf5z/cMR3HIwQ3Xrd3nwnWl5xThmNr5U/F0pI= @@ -2355,11 +2385,14 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= From bc09710f82822904d0fa13a0689f44cdeaaf480c Mon Sep 17 00:00:00 2001 From: Dan Kortschak <90160302+efd6@users.noreply.github.com> Date: Tue, 14 Dec 2021 08:34:21 +1030 Subject: [PATCH 02/57] x-pack/filebeat/input/netflow: record IPv6 src and dst addresses (#29383) --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/netflow/convert.go | 32 ++- ...-extended-uniflow-template-256.golden.json | 4 +- .../IPFIX-Barracuda-firewall.golden.json | 16 +- ...IPFIX-Mikrotik-RouterOS-6.39.2.golden.json | 250 +++++++++++++++--- ...er-with-variable-length-fields.golden.json | 8 +- .../golden/IPFIX-Nokia-BRAS.golden.json | 4 +- .../golden/IPFIX-OpenBSD-pflow.golden.json | 52 ++-- .../testdata/golden/IPFIX-Procera.golden.json | 22 +- ...are-virtual-distributed-switch.golden.json | 14 +- .../IPFIX-YAF-basic-with-applabel.golden.json | 4 +- ...igured-with-include_flowset_id.golden.json | 8 +- .../netflow/testdata/golden/IPFIX.golden.json | 24 +- ...w-9-Cisco-1941-K9-release-15.1.golden.json | 68 ++--- .../golden/Netflow-9-Cisco-ASA.golden.json | 28 +- ...o-ASR-9000-series-template-260.golden.json | 44 +-- .../Netflow-9-Cisco-ASR1001--X.golden.json | 48 ++-- ...flow-9-Fortigate-FortiOS-5.2.1.golden.json | 4 +- ...-9-Fortigate-FortiOS-54x-appid.golden.json | 36 +-- .../testdata/golden/Netflow-9-H3C.golden.json | 48 ++-- .../golden/Netflow-9-IE150-IE151.golden.json | 4 +- ...et-in-large-zero-filled-packet.golden.json | 4 +- ...Palo-Alto-PAN--OS-with-app--id.golden.json | 16 +- .../golden/Netflow-9-Streamcore.golden.json | 8 +- ...ti-Edgerouter-with-MPLS-labels.golden.json | 12 +- ...etflow-9-field-layer2segmentid.golden.json | 4 +- ..._netflow-reduced-size-encoding.golden.json | 8 +- .../golden/Netflow-9-macaddress.golden.json | 56 ++-- ...w-9-multiple-netflow-exporters.golden.json | 30 ++- .../Netflow-9-nprobe-DPI-L7.golden.json | 1 - ...-template-with-0-length-fields.golden.json | 20 +- .../golden/Netflow-9-valid-01.golden.json | 26 +- ...netflow9_e10s_4_7byte_pad.pcap.golden.json | 12 +- ...flow9_ubiquiti_edgerouter.pcap.golden.json | 4 +- 34 files changed, 570 insertions(+), 350 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 38a1f05c043..50cb3176b8f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -149,6 +149,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix `threatintel.misp` filters configuration. {issue}27970[27970] - Fix handling of escaped newlines in the `decode_cef` processor. {issue}16995[16995] {pull}29268[29268] - Fix `panw` module ingest errors for GLOBALPROTECT logs {pull}29154[29154] +- Fix handling of IPv6 addresses in netflow flow events. {issue}19210[19210] {pull}29383[29383] *Heartbeat* diff --git a/x-pack/filebeat/input/netflow/convert.go b/x-pack/filebeat/input/netflow/convert.go index 465cd3efd02..2f10b33c238 100644 --- a/x-pack/filebeat/input/netflow/convert.go +++ b/x-pack/filebeat/input/netflow/convert.go @@ -5,9 +5,11 @@ package netflow import ( + "bytes" "encoding/base64" "encoding/binary" "net" + "sort" "strconv" "strings" "time" @@ -196,6 +198,10 @@ func flowToBeatEvent(flow record.Record, internalNetworks []string) (event beat. ecsSource["ip"] = ip relatedIP = append(relatedIP, ip) ecsSource["locality"] = getIPLocality(internalNetworks, ip).String() + } else if ip, found := getKeyIP(flow.Fields, "sourceIPv6Address"); found { + ecsSource["ip"] = ip + relatedIP = append(relatedIP, ip) + ecsSource["locality"] = getIPLocality(internalNetworks, ip).String() } if sourcePort, found := getKeyUint64(flow.Fields, "sourceTransportPort"); found { ecsSource["port"] = sourcePort @@ -209,6 +215,10 @@ func flowToBeatEvent(flow record.Record, internalNetworks []string) (event beat. ecsDest["ip"] = ip relatedIP = append(relatedIP, ip) ecsDest["locality"] = getIPLocality(internalNetworks, ip).String() + } else if ip, found := getKeyIP(flow.Fields, "destinationIPv6Address"); found { + ecsDest["ip"] = ip + relatedIP = append(relatedIP, ip) + ecsDest["locality"] = getIPLocality(internalNetworks, ip).String() } if destPort, found := getKeyUint64(flow.Fields, "destinationTransportPort"); found { ecsDest["port"] = destPort @@ -321,11 +331,31 @@ func flowToBeatEvent(flow record.Record, internalNetworks []string) (event beat. event.Fields["network"] = ecsNetwork } if len(relatedIP) > 0 { - event.Fields["related"] = common.MapStr{"ip": relatedIP} + event.Fields["related"] = common.MapStr{"ip": uniqueIPs(relatedIP)} } return } +// unique returns ips lexically sorted and with repeated elements +// omitted. +func uniqueIPs(ips []net.IP) []net.IP { + if len(ips) < 2 { + return ips + } + sort.Slice(ips, func(i, j int) bool { return bytes.Compare(ips[i], ips[j]) < 0 }) + curr := 0 + for i, ip := range ips { + if ip.Equal(ips[curr]) { + continue + } + curr++ + if curr < i { + ips[curr], ips[i] = ips[i], nil + } + } + return ips[:curr+1] +} + func getKeyUint64(dict record.Map, key string) (value uint64, found bool) { iface, found := dict[key] if !found { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Barracuda-extended-uniflow-template-256.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Barracuda-extended-uniflow-template-256.golden.json index fbc2f5e3d2a..a983b980e1d 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Barracuda-extended-uniflow-template-256.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Barracuda-extended-uniflow-template-256.golden.json @@ -169,8 +169,8 @@ }, "related": { "ip": [ - "64.235.151.76", - "10.236.5.4" + "10.236.5.4", + "64.235.151.76" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Barracuda-firewall.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Barracuda-firewall.golden.json index ec4f36b10fa..4fae641f637 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Barracuda-firewall.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Barracuda-firewall.golden.json @@ -145,8 +145,8 @@ }, "related": { "ip": [ - "10.99.252.50", - "10.99.130.239" + "10.99.130.239", + "10.99.252.50" ] }, "source": { @@ -225,8 +225,8 @@ }, "related": { "ip": [ - "10.99.130.239", - "10.98.243.20" + "10.98.243.20", + "10.99.130.239" ] }, "source": { @@ -385,8 +385,8 @@ }, "related": { "ip": [ - "10.99.168.140", - "10.98.243.20" + "10.98.243.20", + "10.99.168.140" ] }, "source": { @@ -545,8 +545,8 @@ }, "related": { "ip": [ - "10.99.168.140", - "10.98.243.20" + "10.98.243.20", + "10.99.168.140" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Mikrotik-RouterOS-6.39.2.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Mikrotik-RouterOS-6.39.2.golden.json index 36fea0d68e1..99fb1859cfc 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Mikrotik-RouterOS-6.39.2.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Mikrotik-RouterOS-6.39.2.golden.json @@ -454,8 +454,8 @@ }, "related": { "ip": [ - "172.20.4.199", - "172.20.4.1" + "172.20.4.1", + "172.20.4.199" ] }, "source": { @@ -610,8 +610,8 @@ }, "related": { "ip": [ - "172.20.4.30", - "10.10.8.34" + "10.10.8.34", + "172.20.4.30" ] }, "source": { @@ -766,8 +766,8 @@ }, "related": { "ip": [ - "172.20.4.30", - "10.10.8.105" + "10.10.8.105", + "172.20.4.30" ] }, "source": { @@ -1078,8 +1078,8 @@ }, "related": { "ip": [ - "172.20.5.191", - "10.10.8.220" + "10.10.8.220", + "172.20.5.191" ] }, "source": { @@ -2190,6 +2190,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:401", + "locality": "internal", "port": 5678 }, "event": { @@ -2204,7 +2206,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "b7SlZfUSuVA", "locality": "internal" }, "netflow": { @@ -2233,7 +2235,7 @@ }, "network": { "bytes": 555, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:jPPsu6xuLKidwts3HEFDcMotUV4=", "direction": "unknown", "iana_number": 17, "packets": 3, @@ -2242,8 +2244,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:401" + ] + }, "source": { "bytes": 555, + "ip": "fe80::ff:fe00:401", + "locality": "internal", "packets": 3, "port": 5678 } @@ -2256,6 +2265,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:401", + "locality": "internal", "port": 5678 }, "event": { @@ -2270,7 +2281,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "b7SlZfUSuVA", "locality": "internal" }, "netflow": { @@ -2299,7 +2310,7 @@ }, "network": { "bytes": 370, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:jPPsu6xuLKidwts3HEFDcMotUV4=", "direction": "unknown", "iana_number": 17, "packets": 2, @@ -2308,8 +2319,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:401" + ] + }, "source": { "bytes": 370, + "ip": "fe80::ff:fe00:401", + "locality": "internal", "packets": 2, "port": 5678 } @@ -2322,6 +2340,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:501", + "locality": "internal", "port": 5678 }, "event": { @@ -2336,7 +2356,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "VSgWWLDT0B0", "locality": "internal" }, "netflow": { @@ -2365,7 +2385,7 @@ }, "network": { "bytes": 495, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:5591MHyJIXcwUkG4sl3Rs9ro+Ng=", "direction": "unknown", "iana_number": 17, "packets": 3, @@ -2374,8 +2394,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:501" + ] + }, "source": { "bytes": 495, + "ip": "fe80::ff:fe00:501", + "locality": "internal", "packets": 3, "port": 5678 } @@ -2388,6 +2415,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:501", + "locality": "internal", "port": 5678 }, "event": { @@ -2402,7 +2431,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "VSgWWLDT0B0", "locality": "internal" }, "netflow": { @@ -2431,7 +2460,7 @@ }, "network": { "bytes": 330, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:5591MHyJIXcwUkG4sl3Rs9ro+Ng=", "direction": "unknown", "iana_number": 17, "packets": 2, @@ -2440,8 +2469,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:501" + ] + }, "source": { "bytes": 330, + "ip": "fe80::ff:fe00:501", + "locality": "internal", "packets": 2, "port": 5678 } @@ -2454,6 +2490,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:601", + "locality": "internal", "port": 5678 }, "event": { @@ -2468,7 +2506,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "LZgYFJ0tL2g", "locality": "internal" }, "netflow": { @@ -2497,7 +2535,7 @@ }, "network": { "bytes": 555, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:0uFiK83G7mZT66yRsishu3GrI+Y=", "direction": "unknown", "iana_number": 17, "packets": 3, @@ -2506,8 +2544,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:601" + ] + }, "source": { "bytes": 555, + "ip": "fe80::ff:fe00:601", + "locality": "internal", "packets": 3, "port": 5678 } @@ -2520,6 +2565,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:601", + "locality": "internal", "port": 5678 }, "event": { @@ -2534,7 +2581,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "LZgYFJ0tL2g", "locality": "internal" }, "netflow": { @@ -2563,7 +2610,7 @@ }, "network": { "bytes": 370, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:0uFiK83G7mZT66yRsishu3GrI+Y=", "direction": "unknown", "iana_number": 17, "packets": 2, @@ -2572,8 +2619,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:601" + ] + }, "source": { "bytes": 370, + "ip": "fe80::ff:fe00:601", + "locality": "internal", "packets": 2, "port": 5678 } @@ -2586,6 +2640,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:701", + "locality": "internal", "port": 5678 }, "event": { @@ -2600,7 +2656,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "dmeH14jqz_U", "locality": "internal" }, "netflow": { @@ -2629,7 +2685,7 @@ }, "network": { "bytes": 555, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:GHzTYB/S+swKAM+TWkXhIHekjME=", "direction": "unknown", "iana_number": 17, "packets": 3, @@ -2638,8 +2694,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:701" + ] + }, "source": { "bytes": 555, + "ip": "fe80::ff:fe00:701", + "locality": "internal", "packets": 3, "port": 5678 } @@ -2652,6 +2715,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:701", + "locality": "internal", "port": 5678 }, "event": { @@ -2666,7 +2731,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "dmeH14jqz_U", "locality": "internal" }, "netflow": { @@ -2695,7 +2760,7 @@ }, "network": { "bytes": 370, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:GHzTYB/S+swKAM+TWkXhIHekjME=", "direction": "unknown", "iana_number": 17, "packets": 2, @@ -2704,8 +2769,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:701" + ] + }, "source": { "bytes": 370, + "ip": "fe80::ff:fe00:701", + "locality": "internal", "packets": 2, "port": 5678 } @@ -2718,6 +2790,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:801", + "locality": "internal", "port": 5678 }, "event": { @@ -2732,7 +2806,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "Il9O6oJGqRk", "locality": "internal" }, "netflow": { @@ -2761,7 +2835,7 @@ }, "network": { "bytes": 555, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:Y0L0KaggvOgNiQSbjBDXSANtIRo=", "direction": "unknown", "iana_number": 17, "packets": 3, @@ -2770,8 +2844,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:801" + ] + }, "source": { "bytes": 555, + "ip": "fe80::ff:fe00:801", + "locality": "internal", "packets": 3, "port": 5678 } @@ -2784,6 +2865,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:801", + "locality": "internal", "port": 5678 }, "event": { @@ -2798,7 +2881,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "Il9O6oJGqRk", "locality": "internal" }, "netflow": { @@ -2827,7 +2910,7 @@ }, "network": { "bytes": 370, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:Y0L0KaggvOgNiQSbjBDXSANtIRo=", "direction": "unknown", "iana_number": 17, "packets": 2, @@ -2836,8 +2919,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:801" + ] + }, "source": { "bytes": 370, + "ip": "fe80::ff:fe00:801", + "locality": "internal", "packets": 2, "port": 5678 } @@ -2850,6 +2940,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:901", + "locality": "internal", "port": 5678 }, "event": { @@ -2864,7 +2956,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "fA2V7HT45yo", "locality": "internal" }, "netflow": { @@ -2893,7 +2985,7 @@ }, "network": { "bytes": 555, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:ckujBEtohW0WnvxDVLoLAfkwHeE=", "direction": "unknown", "iana_number": 17, "packets": 3, @@ -2902,8 +2994,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:901" + ] + }, "source": { "bytes": 555, + "ip": "fe80::ff:fe00:901", + "locality": "internal", "packets": 3, "port": 5678 } @@ -2916,6 +3015,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:901", + "locality": "internal", "port": 5678 }, "event": { @@ -2930,7 +3031,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "fA2V7HT45yo", "locality": "internal" }, "netflow": { @@ -2959,7 +3060,7 @@ }, "network": { "bytes": 370, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:ckujBEtohW0WnvxDVLoLAfkwHeE=", "direction": "unknown", "iana_number": 17, "packets": 2, @@ -2968,8 +3069,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:901" + ] + }, "source": { "bytes": 370, + "ip": "fe80::ff:fe00:901", + "locality": "internal", "packets": 2, "port": 5678 } @@ -2982,6 +3090,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:1001", + "locality": "internal", "port": 5678 }, "event": { @@ -2996,7 +3106,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "r9myTc0ZAtE", "locality": "internal" }, "netflow": { @@ -3025,7 +3135,7 @@ }, "network": { "bytes": 555, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:3MYYQzFTLjghJ6R8FULtQ6M3TY4=", "direction": "unknown", "iana_number": 17, "packets": 3, @@ -3034,8 +3144,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:1001" + ] + }, "source": { "bytes": 555, + "ip": "fe80::ff:fe00:1001", + "locality": "internal", "packets": 3, "port": 5678 } @@ -3048,6 +3165,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:1001", + "locality": "internal", "port": 5678 }, "event": { @@ -3062,7 +3181,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "r9myTc0ZAtE", "locality": "internal" }, "netflow": { @@ -3091,7 +3210,7 @@ }, "network": { "bytes": 370, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:3MYYQzFTLjghJ6R8FULtQ6M3TY4=", "direction": "unknown", "iana_number": 17, "packets": 2, @@ -3100,8 +3219,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:1001" + ] + }, "source": { "bytes": 370, + "ip": "fe80::ff:fe00:1001", + "locality": "internal", "packets": 2, "port": 5678 } @@ -3114,6 +3240,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:1101", + "locality": "internal", "port": 5678 }, "event": { @@ -3128,7 +3256,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "we4v-M4gTEo", "locality": "internal" }, "netflow": { @@ -3157,7 +3285,7 @@ }, "network": { "bytes": 555, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:RCaNdj14AFbHfSM4MQquuPjYpgs=", "direction": "unknown", "iana_number": 17, "packets": 3, @@ -3166,8 +3294,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:1101" + ] + }, "source": { "bytes": 555, + "ip": "fe80::ff:fe00:1101", + "locality": "internal", "packets": 3, "port": 5678 } @@ -3180,6 +3315,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:1101", + "locality": "internal", "port": 5678 }, "event": { @@ -3194,7 +3331,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "we4v-M4gTEo", "locality": "internal" }, "netflow": { @@ -3223,7 +3360,7 @@ }, "network": { "bytes": 370, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:RCaNdj14AFbHfSM4MQquuPjYpgs=", "direction": "unknown", "iana_number": 17, "packets": 2, @@ -3232,8 +3369,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:1101" + ] + }, "source": { "bytes": 370, + "ip": "fe80::ff:fe00:1101", + "locality": "internal", "packets": 2, "port": 5678 } @@ -3246,6 +3390,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:1201", + "locality": "internal", "port": 5678 }, "event": { @@ -3260,7 +3406,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "N9KUKy-eIwc", "locality": "internal" }, "netflow": { @@ -3289,7 +3435,7 @@ }, "network": { "bytes": 555, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:AL2CtUGKb1BgJM4KclloxlRQdRc=", "direction": "unknown", "iana_number": 17, "packets": 3, @@ -3298,8 +3444,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:1201" + ] + }, "source": { "bytes": 555, + "ip": "fe80::ff:fe00:1201", + "locality": "internal", "packets": 3, "port": 5678 } @@ -3312,6 +3465,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "fe80::ff:fe00:1201", + "locality": "internal", "port": 5678 }, "event": { @@ -3326,7 +3481,7 @@ ] }, "flow": { - "id": "RlrAo_U1Y14", + "id": "N9KUKy-eIwc", "locality": "internal" }, "netflow": { @@ -3355,7 +3510,7 @@ }, "network": { "bytes": 370, - "community_id": "1:I4DlCbWgyxRiNPVj5ntu1L7Z0hw=", + "community_id": "1:AL2CtUGKb1BgJM4KclloxlRQdRc=", "direction": "unknown", "iana_number": 17, "packets": 2, @@ -3364,8 +3519,15 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::ff:fe00:1201" + ] + }, "source": { "bytes": 370, + "ip": "fe80::ff:fe00:1201", + "locality": "internal", "packets": 2, "port": 5678 } diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Netscaler-with-variable-length-fields.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Netscaler-with-variable-length-fields.golden.json index e27655fe1ed..9d0ddb6a2fd 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Netscaler-with-variable-length-fields.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Netscaler-with-variable-length-fields.golden.json @@ -87,8 +87,8 @@ }, "related": { "ip": [ - "192.168.0.1", - "10.0.0.1" + "10.0.0.1", + "192.168.0.1" ] }, "source": { @@ -277,8 +277,8 @@ }, "related": { "ip": [ - "192.168.0.1", - "10.0.0.1" + "10.0.0.1", + "192.168.0.1" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Nokia-BRAS.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Nokia-BRAS.golden.json index f21438c20ee..3f50095a6c0 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Nokia-BRAS.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Nokia-BRAS.golden.json @@ -57,8 +57,8 @@ }, "related": { "ip": [ - "10.0.1.228", - "10.0.0.34" + "10.0.0.34", + "10.0.1.228" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-OpenBSD-pflow.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-OpenBSD-pflow.golden.json index 4961f7d0a25..04c22e39df4 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-OpenBSD-pflow.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-OpenBSD-pflow.golden.json @@ -60,8 +60,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -208,8 +208,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -356,8 +356,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -504,8 +504,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -652,8 +652,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -800,8 +800,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -948,8 +948,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -1096,8 +1096,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -1244,8 +1244,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -1392,8 +1392,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -1540,8 +1540,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -1688,8 +1688,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { @@ -1836,8 +1836,8 @@ }, "related": { "ip": [ - "192.168.0.17", - "192.168.0.1" + "192.168.0.1", + "192.168.0.17" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Procera.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Procera.golden.json index 30acfdf29c5..4c2a0fa8253 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Procera.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-Procera.golden.json @@ -69,8 +69,8 @@ }, "related": { "ip": [ - "181.214.87.71", - "138.44.161.14" + "138.44.161.14", + "181.214.87.71" ] }, "source": { @@ -150,7 +150,6 @@ }, "related": { "ip": [ - "0.0.0.0", "0.0.0.0" ] }, @@ -312,8 +311,8 @@ }, "related": { "ip": [ - "206.117.25.89", - "138.44.161.14" + "138.44.161.14", + "206.117.25.89" ] }, "source": { @@ -393,7 +392,6 @@ }, "related": { "ip": [ - "0.0.0.0", "0.0.0.0" ] }, @@ -474,8 +472,8 @@ }, "related": { "ip": [ - "185.232.29.199", - "138.44.161.14" + "138.44.161.14", + "185.232.29.199" ] }, "source": { @@ -555,8 +553,8 @@ }, "related": { "ip": [ - "177.188.228.137", - "138.44.161.14" + "138.44.161.14", + "177.188.228.137" ] }, "source": { @@ -636,8 +634,8 @@ }, "related": { "ip": [ - "138.44.161.14", - "138.44.161.13" + "138.44.161.13", + "138.44.161.14" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-VMware-virtual-distributed-switch.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-VMware-virtual-distributed-switch.golden.json index dc8f538dfee..3396c5ef794 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-VMware-virtual-distributed-switch.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-VMware-virtual-distributed-switch.golden.json @@ -338,6 +338,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "ff02::1:3", + "locality": "internal", "port": 5355 }, "event": { @@ -352,7 +354,7 @@ ] }, "flow": { - "id": "y_Vml2vPNtw", + "id": "iOQ1bg2JOLM", "locality": "internal" }, "netflow": { @@ -388,7 +390,7 @@ }, "network": { "bytes": 144, - "community_id": "1:Nl0K3f1AqKrkGYEhoNHcgFAr/EY=", + "community_id": "1:pr+rxLjqBu9/jT6yJoAEy7/fgdY=", "direction": "outbound", "iana_number": 17, "packets": 2, @@ -397,8 +399,16 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::5187:5cd8:d750:cdc9", + "ff02::1:3" + ] + }, "source": { "bytes": 144, + "ip": "fe80::5187:5cd8:d750:cdc9", + "locality": "internal", "packets": 2, "port": 61329 } diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-YAF-basic-with-applabel.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-YAF-basic-with-applabel.golden.json index 5b2b4b01ac3..6c77e7a0fa9 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-YAF-basic-with-applabel.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-YAF-basic-with-applabel.golden.json @@ -70,8 +70,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.100" + "172.16.32.100", + "172.16.32.201" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-configured-with-include_flowset_id.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-configured-with-include_flowset_id.golden.json index 0c2dbd22d5e..c3934c5e32b 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-configured-with-include_flowset_id.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX-configured-with-include_flowset_id.golden.json @@ -87,8 +87,8 @@ }, "related": { "ip": [ - "192.168.0.1", - "10.0.0.1" + "10.0.0.1", + "192.168.0.1" ] }, "source": { @@ -277,8 +277,8 @@ }, "related": { "ip": [ - "192.168.0.1", - "10.0.0.1" + "10.0.0.1", + "192.168.0.1" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX.golden.json index 72dd4072ef9..7ba0ecb0713 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/IPFIX.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/IPFIX.golden.json @@ -180,8 +180,8 @@ }, "related": { "ip": [ - "192.168.253.128", - "192.168.253.1" + "192.168.253.1", + "192.168.253.128" ] }, "source": { @@ -336,8 +336,8 @@ }, "related": { "ip": [ - "192.168.253.132", - "192.168.253.2" + "192.168.253.2", + "192.168.253.132" ] }, "source": { @@ -492,8 +492,8 @@ }, "related": { "ip": [ - "192.168.253.132", - "54.214.9.161" + "54.214.9.161", + "192.168.253.132" ] }, "source": { @@ -570,8 +570,8 @@ }, "related": { "ip": [ - "192.168.253.130", - "10.4.36.64" + "10.4.36.64", + "192.168.253.130" ] }, "source": { @@ -726,8 +726,8 @@ }, "related": { "ip": [ - "192.168.253.128", - "192.168.253.1" + "192.168.253.1", + "192.168.253.128" ] }, "source": { @@ -882,8 +882,8 @@ }, "related": { "ip": [ - "192.168.253.128", - "192.168.253.1" + "192.168.253.1", + "192.168.253.128" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-1941-K9-release-15.1.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-1941-K9-release-15.1.golden.json index 448709e5c41..834efbc7df6 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-1941-K9-release-15.1.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-1941-K9-release-15.1.golden.json @@ -63,8 +63,8 @@ }, "related": { "ip": [ - "192.168.0.111", - "62.217.193.1" + "62.217.193.1", + "192.168.0.111" ] }, "source": { @@ -141,8 +141,8 @@ }, "related": { "ip": [ - "192.168.0.111", - "62.217.193.65" + "62.217.193.65", + "192.168.0.111" ] }, "source": { @@ -219,8 +219,8 @@ }, "related": { "ip": [ - "192.168.0.111", - "62.217.193.1" + "62.217.193.1", + "192.168.0.111" ] }, "source": { @@ -297,8 +297,8 @@ }, "related": { "ip": [ - "192.168.0.111", - "62.217.193.65" + "62.217.193.65", + "192.168.0.111" ] }, "source": { @@ -531,8 +531,8 @@ }, "related": { "ip": [ - "216.58.212.195", - "192.168.0.88" + "192.168.0.88", + "216.58.212.195" ] }, "source": { @@ -687,8 +687,8 @@ }, "related": { "ip": [ - "216.58.201.106", - "192.168.1.201" + "192.168.1.201", + "216.58.201.106" ] }, "source": { @@ -843,8 +843,8 @@ }, "related": { "ip": [ - "192.168.3.34", - "52.216.130.237" + "52.216.130.237", + "192.168.3.34" ] }, "source": { @@ -921,8 +921,8 @@ }, "related": { "ip": [ - "209.197.3.19", - "192.168.3.34" + "192.168.3.34", + "209.197.3.19" ] }, "source": { @@ -1077,8 +1077,8 @@ }, "related": { "ip": [ - "192.168.0.157", - "172.217.23.232" + "172.217.23.232", + "192.168.0.157" ] }, "source": { @@ -1311,8 +1311,8 @@ }, "related": { "ip": [ - "192.168.3.178", - "107.21.232.174" + "107.21.232.174", + "192.168.3.178" ] }, "source": { @@ -1389,8 +1389,8 @@ }, "related": { "ip": [ - "192.168.2.118", - "95.0.145.242" + "95.0.145.242", + "192.168.2.118" ] }, "source": { @@ -1545,8 +1545,8 @@ }, "related": { "ip": [ - "192.168.0.79", - "23.5.100.66" + "23.5.100.66", + "192.168.0.79" ] }, "source": { @@ -1623,8 +1623,8 @@ }, "related": { "ip": [ - "192.168.0.79", - "23.5.100.66" + "23.5.100.66", + "192.168.0.79" ] }, "source": { @@ -1857,8 +1857,8 @@ }, "related": { "ip": [ - "192.168.0.61", - "170.251.180.15" + "170.251.180.15", + "192.168.0.61" ] }, "source": { @@ -1935,8 +1935,8 @@ }, "related": { "ip": [ - "192.168.3.34", - "74.119.119.84" + "74.119.119.84", + "192.168.3.34" ] }, "source": { @@ -2091,8 +2091,8 @@ }, "related": { "ip": [ - "192.168.3.200", - "185.60.218.15" + "185.60.218.15", + "192.168.3.200" ] }, "source": { @@ -2247,8 +2247,8 @@ }, "related": { "ip": [ - "192.168.0.95", - "169.45.214.246" + "169.45.214.246", + "192.168.0.95" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASA.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASA.golden.json index 135aa56d0d4..d8ebc7e8eaa 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASA.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASA.golden.json @@ -69,8 +69,8 @@ }, "related": { "ip": [ - "192.168.14.1", - "2.2.2.11" + "2.2.2.11", + "192.168.14.1" ] }, "source": { @@ -151,8 +151,8 @@ }, "related": { "ip": [ - "192.168.23.22", - "164.164.37.11" + "164.164.37.11", + "192.168.23.22" ] }, "source": { @@ -315,8 +315,8 @@ }, "related": { "ip": [ - "192.168.23.20", - "164.164.37.11" + "164.164.37.11", + "192.168.23.20" ] }, "source": { @@ -479,8 +479,8 @@ }, "related": { "ip": [ - "192.168.14.11", - "2.2.2.11" + "2.2.2.11", + "192.168.14.11" ] }, "source": { @@ -725,8 +725,8 @@ }, "related": { "ip": [ - "192.168.14.1", - "2.2.2.11" + "2.2.2.11", + "192.168.14.1" ] }, "source": { @@ -889,8 +889,8 @@ }, "related": { "ip": [ - "192.168.23.22", - "164.164.37.11" + "164.164.37.11", + "192.168.23.22" ] }, "source": { @@ -1053,8 +1053,8 @@ }, "related": { "ip": [ - "192.168.23.20", - "164.164.37.11" + "164.164.37.11", + "192.168.23.20" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASR-9000-series-template-260.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASR-9000-series-template-260.golden.json index 9922cc10d66..70cfcfdd14e 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASR-9000-series-template-260.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASR-9000-series-template-260.golden.json @@ -421,8 +421,8 @@ }, "related": { "ip": [ - "10.0.34.71", - "10.0.20.242" + "10.0.20.242", + "10.0.34.71" ] }, "source": { @@ -595,8 +595,8 @@ }, "related": { "ip": [ - "10.0.37.29", - "10.0.6.24" + "10.0.6.24", + "10.0.37.29" ] }, "source": { @@ -682,8 +682,8 @@ }, "related": { "ip": [ - "10.0.32.176", - "10.0.11.113" + "10.0.11.113", + "10.0.32.176" ] }, "source": { @@ -856,8 +856,8 @@ }, "related": { "ip": [ - "10.0.4.212", - "10.0.3.110" + "10.0.3.110", + "10.0.4.212" ] }, "source": { @@ -943,8 +943,8 @@ }, "related": { "ip": [ - "10.0.33.122", - "10.0.1.136" + "10.0.1.136", + "10.0.33.122" ] }, "source": { @@ -1204,8 +1204,8 @@ }, "related": { "ip": [ - "10.0.25.59", - "10.0.2.18" + "10.0.2.18", + "10.0.25.59" ] }, "source": { @@ -1465,8 +1465,8 @@ }, "related": { "ip": [ - "10.0.28.150", - "10.0.24.13" + "10.0.24.13", + "10.0.28.150" ] }, "source": { @@ -1552,8 +1552,8 @@ }, "related": { "ip": [ - "10.0.26.188", - "10.0.21.200" + "10.0.21.200", + "10.0.26.188" ] }, "source": { @@ -1639,8 +1639,8 @@ }, "related": { "ip": [ - "10.0.29.34", - "10.0.15.38" + "10.0.15.38", + "10.0.29.34" ] }, "source": { @@ -1726,8 +1726,8 @@ }, "related": { "ip": [ - "10.0.8.200", - "10.0.5.224" + "10.0.5.224", + "10.0.8.200" ] }, "source": { @@ -1813,8 +1813,8 @@ }, "related": { "ip": [ - "10.0.29.46", - "10.0.15.38" + "10.0.15.38", + "10.0.29.46" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASR1001--X.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASR1001--X.golden.json index 9049d551304..b548a68d523 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASR1001--X.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Cisco-ASR1001--X.golden.json @@ -63,8 +63,8 @@ }, "related": { "ip": [ - "10.111.111.242", - "10.12.100.13" + "10.12.100.13", + "10.111.111.242" ] }, "source": { @@ -294,8 +294,8 @@ }, "related": { "ip": [ - "10.12.104.239", - "10.10.11.21" + "10.10.11.21", + "10.12.104.239" ] }, "source": { @@ -448,8 +448,8 @@ }, "related": { "ip": [ - "10.100.101.45", - "10.15.131.98" + "10.15.131.98", + "10.100.101.45" ] }, "source": { @@ -525,8 +525,8 @@ }, "related": { "ip": [ - "10.100.101.43", - "10.12.105.23" + "10.12.105.23", + "10.100.101.43" ] }, "source": { @@ -602,8 +602,8 @@ }, "related": { "ip": [ - "31.13.71.7", - "10.11.31.108" + "10.11.31.108", + "31.13.71.7" ] }, "source": { @@ -833,8 +833,8 @@ }, "related": { "ip": [ - "10.100.105.86", - "10.11.21.60" + "10.11.21.60", + "10.100.105.86" ] }, "source": { @@ -987,8 +987,8 @@ }, "related": { "ip": [ - "10.12.106.83", - "10.10.11.21" + "10.10.11.21", + "10.12.106.83" ] }, "source": { @@ -1064,8 +1064,8 @@ }, "related": { "ip": [ - "172.217.11.5", - "10.12.92.102" + "10.12.92.102", + "172.217.11.5" ] }, "source": { @@ -1295,8 +1295,8 @@ }, "related": { "ip": [ - "10.14.121.98", - "10.12.100.13" + "10.12.100.13", + "10.14.121.98" ] }, "source": { @@ -1526,8 +1526,8 @@ }, "related": { "ip": [ - "10.12.102.125", - "10.10.11.21" + "10.10.11.21", + "10.12.102.125" ] }, "source": { @@ -1603,8 +1603,8 @@ }, "related": { "ip": [ - "10.100.105.86", - "10.11.21.60" + "10.11.21.60", + "10.100.105.86" ] }, "source": { @@ -1757,8 +1757,8 @@ }, "related": { "ip": [ - "10.100.105.85", - "10.10.4.151" + "10.10.4.151", + "10.100.105.85" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Fortigate-FortiOS-5.2.1.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Fortigate-FortiOS-5.2.1.golden.json index 8dc5747704a..962a60d4efd 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Fortigate-FortiOS-5.2.1.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Fortigate-FortiOS-5.2.1.golden.json @@ -102,8 +102,8 @@ }, "related": { "ip": [ - "192.168.99.7", - "31.13.87.36" + "31.13.87.36", + "192.168.99.7" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Fortigate-FortiOS-54x-appid.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Fortigate-FortiOS-54x-appid.golden.json index dd90fa13b6d..a6d193432de 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Fortigate-FortiOS-54x-appid.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Fortigate-FortiOS-54x-appid.golden.json @@ -71,8 +71,8 @@ }, "related": { "ip": [ - "192.168.100.151", - "182.50.136.239" + "182.50.136.239", + "192.168.100.151" ] }, "source": { @@ -156,8 +156,8 @@ }, "related": { "ip": [ - "208.100.17.187", - "192.168.100.151" + "192.168.100.151", + "208.100.17.187" ] }, "source": { @@ -326,8 +326,8 @@ }, "related": { "ip": [ - "208.100.17.189", - "192.168.100.151" + "192.168.100.151", + "208.100.17.189" ] }, "source": { @@ -581,8 +581,8 @@ }, "related": { "ip": [ - "192.168.100.151", - "178.255.83.1" + "178.255.83.1", + "192.168.100.151" ] }, "source": { @@ -751,8 +751,8 @@ }, "related": { "ip": [ - "192.168.100.151", - "178.255.83.1" + "178.255.83.1", + "192.168.100.151" ] }, "source": { @@ -913,8 +913,8 @@ }, "related": { "ip": [ - "192.168.100.150", - "192.168.100.111" + "192.168.100.111", + "192.168.100.150" ] }, "source": { @@ -1075,8 +1075,8 @@ }, "related": { "ip": [ - "192.168.100.150", - "192.168.100.111" + "192.168.100.111", + "192.168.100.150" ] }, "source": { @@ -1237,8 +1237,8 @@ }, "related": { "ip": [ - "192.168.100.150", - "192.168.100.111" + "192.168.100.111", + "192.168.100.150" ] }, "source": { @@ -1399,8 +1399,8 @@ }, "related": { "ip": [ - "192.168.100.150", - "192.168.100.111" + "192.168.100.111", + "192.168.100.150" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-H3C.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-H3C.golden.json index a69dbeea386..b75a1c2ba3b 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-H3C.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-H3C.golden.json @@ -76,8 +76,8 @@ }, "related": { "ip": [ - "10.22.166.30", - "10.22.163.21" + "10.22.163.21", + "10.22.166.30" ] }, "source": { @@ -166,8 +166,8 @@ }, "related": { "ip": [ - "10.22.166.12", - "10.21.3.172" + "10.21.3.172", + "10.22.166.12" ] }, "source": { @@ -346,8 +346,8 @@ }, "related": { "ip": [ - "10.22.166.35", - "10.20.100.253" + "10.20.100.253", + "10.22.166.35" ] }, "source": { @@ -436,8 +436,8 @@ }, "related": { "ip": [ - "10.22.166.36", - "10.20.136.36" + "10.20.136.36", + "10.22.166.36" ] }, "source": { @@ -526,8 +526,8 @@ }, "related": { "ip": [ - "10.22.166.36", - "10.20.147.28" + "10.20.147.28", + "10.22.166.36" ] }, "source": { @@ -616,8 +616,8 @@ }, "related": { "ip": [ - "10.22.166.28", - "10.20.141.16" + "10.20.141.16", + "10.22.166.28" ] }, "source": { @@ -706,8 +706,8 @@ }, "related": { "ip": [ - "10.22.166.35", - "10.20.162.17" + "10.20.162.17", + "10.22.166.35" ] }, "source": { @@ -796,8 +796,8 @@ }, "related": { "ip": [ - "10.22.166.15", - "10.20.171.36" + "10.20.171.36", + "10.22.166.15" ] }, "source": { @@ -1156,8 +1156,8 @@ }, "related": { "ip": [ - "10.22.166.25", - "10.20.166.26" + "10.20.166.26", + "10.22.166.25" ] }, "source": { @@ -1246,8 +1246,8 @@ }, "related": { "ip": [ - "10.22.166.12", - "10.21.3.117" + "10.21.3.117", + "10.22.166.12" ] }, "source": { @@ -1336,8 +1336,8 @@ }, "related": { "ip": [ - "10.22.166.17", - "10.22.145.26" + "10.22.145.26", + "10.22.166.17" ] }, "source": { @@ -1426,8 +1426,8 @@ }, "related": { "ip": [ - "10.22.166.36", - "10.21.75.38" + "10.21.75.38", + "10.22.166.36" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-IE150-IE151.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-IE150-IE151.golden.json index 3aed82dc6f9..3db18648d08 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-IE150-IE151.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-IE150-IE151.golden.json @@ -63,8 +63,8 @@ }, "related": { "ip": [ - "192.168.0.3", - "192.168.0.2" + "192.168.0.2", + "192.168.0.3" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Palo-Alto-1-flowset-in-large-zero-filled-packet.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Palo-Alto-1-flowset-in-large-zero-filled-packet.golden.json index d0207ba3192..60f97f06495 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Palo-Alto-1-flowset-in-large-zero-filled-packet.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Palo-Alto-1-flowset-in-large-zero-filled-packet.golden.json @@ -69,8 +69,8 @@ }, "related": { "ip": [ - "134.220.2.6", - "134.220.1.156" + "134.220.1.156", + "134.220.2.6" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Palo-Alto-PAN--OS-with-app--id.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Palo-Alto-PAN--OS-with-app--id.golden.json index 79e31dd7f6b..98d496c0a7a 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Palo-Alto-PAN--OS-with-app--id.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Palo-Alto-PAN--OS-with-app--id.golden.json @@ -69,8 +69,8 @@ }, "related": { "ip": [ - "23.35.171.27", - "10.32.91.205" + "10.32.91.205", + "23.35.171.27" ] }, "source": { @@ -318,8 +318,8 @@ }, "related": { "ip": [ - "23.209.52.99", - "10.130.145.44" + "10.130.145.44", + "23.209.52.99" ] }, "source": { @@ -401,8 +401,8 @@ }, "related": { "ip": [ - "10.50.97.57", - "10.50.96.20" + "10.50.96.20", + "10.50.97.57" ] }, "source": { @@ -567,8 +567,8 @@ }, "related": { "ip": [ - "34.234.173.147", - "10.48.208.209" + "10.48.208.209", + "34.234.173.147" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Streamcore.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Streamcore.golden.json index 1319ba663cc..ebfde635048 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Streamcore.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Streamcore.golden.json @@ -64,8 +64,8 @@ }, "related": { "ip": [ - "100.78.40.201", - "10.231.128.150" + "10.231.128.150", + "100.78.40.201" ] }, "source": { @@ -220,8 +220,8 @@ }, "related": { "ip": [ - "100.78.40.201", - "10.27.8.20" + "10.27.8.20", + "100.78.40.201" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Ubiquiti-Edgerouter-with-MPLS-labels.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Ubiquiti-Edgerouter-with-MPLS-labels.golden.json index e6cb0fb4112..fec0ac80498 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Ubiquiti-Edgerouter-with-MPLS-labels.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-Ubiquiti-Edgerouter-with-MPLS-labels.golden.json @@ -420,8 +420,8 @@ }, "related": { "ip": [ - "10.5.0.91", - "10.4.0.251" + "10.4.0.251", + "10.5.0.91" ] }, "source": { @@ -767,8 +767,8 @@ }, "related": { "ip": [ - "192.168.1.98", - "10.0.0.73" + "10.0.0.73", + "192.168.1.98" ] }, "source": { @@ -1362,8 +1362,8 @@ }, "related": { "ip": [ - "192.168.1.102", - "10.2.0.95" + "10.2.0.95", + "192.168.1.102" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-field-layer2segmentid.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-field-layer2segmentid.golden.json index 879714e24c0..73d4e4e7fdf 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-field-layer2segmentid.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-field-layer2segmentid.golden.json @@ -68,8 +68,8 @@ }, "related": { "ip": [ - "192.168.200.136", - "80.82.237.40" + "80.82.237.40", + "192.168.200.136" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-ipt_netflow-reduced-size-encoding.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-ipt_netflow-reduced-size-encoding.golden.json index 2b7dded5bd6..4e6b375574b 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-ipt_netflow-reduced-size-encoding.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-ipt_netflow-reduced-size-encoding.golden.json @@ -333,8 +333,8 @@ }, "related": { "ip": [ - "193.151.192.46", - "10.236.8.4" + "10.236.8.4", + "193.151.192.46" ] }, "source": { @@ -942,8 +942,8 @@ }, "related": { "ip": [ - "23.43.139.27", - "10.232.8.45" + "10.232.8.45", + "23.43.139.27" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-macaddress.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-macaddress.golden.json index 7db570f5db1..e1385446903 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-macaddress.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-macaddress.golden.json @@ -157,8 +157,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.100" + "172.16.32.100", + "172.16.32.201" ] }, "source": { @@ -358,8 +358,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -492,8 +492,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -626,8 +626,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -760,8 +760,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -894,8 +894,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -1028,8 +1028,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -1162,8 +1162,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -1296,8 +1296,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -1430,8 +1430,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -1564,8 +1564,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -1698,8 +1698,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -1832,8 +1832,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { @@ -1966,8 +1966,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-multiple-netflow-exporters.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-multiple-netflow-exporters.golden.json index 4238292f250..f75a3893975 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-multiple-netflow-exporters.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-multiple-netflow-exporters.golden.json @@ -180,8 +180,8 @@ }, "related": { "ip": [ - "172.16.32.248", - "172.16.32.100" + "172.16.32.100", + "172.16.32.248" ] }, "source": { @@ -338,8 +338,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.100" + "172.16.32.100", + "172.16.32.201" ] }, "source": { @@ -496,8 +496,8 @@ }, "related": { "ip": [ - "172.16.32.202", - "172.16.32.100" + "172.16.32.100", + "172.16.32.202" ] }, "source": { @@ -516,6 +516,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "ff02::1", + "locality": "internal", "port": 34304 }, "event": { @@ -533,7 +535,7 @@ ] }, "flow": { - "id": "tYpw8DU5u10", + "id": "hsSxbBU-M1o", "locality": "internal" }, "netflow": { @@ -562,7 +564,7 @@ }, "network": { "bytes": 672, - "community_id": "1:vK+Zeop1Y3GHxfFGVF2/COcNBWw=", + "community_id": "1:z1qoJyUMuKy3HX8rkIDvBK/vyL8=", "direction": "unknown", "iana_number": 58, "packets": 7, @@ -571,8 +573,16 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::20c:29ff:fe83:3b6e", + "ff02::1" + ] + }, "source": { "bytes": 672, + "ip": "fe80::20c:29ff:fe83:3b6e", + "locality": "internal", "packets": 7, "port": 0 } @@ -648,8 +658,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.1" + "172.16.32.1", + "172.16.32.201" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-nprobe-DPI-L7.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-nprobe-DPI-L7.golden.json index 3e6a1d03719..ddb22d837b5 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-nprobe-DPI-L7.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-nprobe-DPI-L7.golden.json @@ -59,7 +59,6 @@ }, "related": { "ip": [ - "0.0.0.0", "0.0.0.0" ] }, diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-template-with-0-length-fields.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-template-with-0-length-fields.golden.json index 65a849e632a..64a7ea5e948 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-template-with-0-length-fields.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-template-with-0-length-fields.golden.json @@ -69,8 +69,8 @@ }, "related": { "ip": [ - "239.255.255.250", - "192.168.1.80" + "192.168.1.80", + "239.255.255.250" ] }, "source": { @@ -235,8 +235,8 @@ }, "related": { "ip": [ - "239.255.255.250", - "192.168.1.95" + "192.168.1.95", + "239.255.255.250" ] }, "source": { @@ -401,8 +401,8 @@ }, "related": { "ip": [ - "239.255.255.250", - "192.168.1.95" + "192.168.1.95", + "239.255.255.250" ] }, "source": { @@ -567,8 +567,8 @@ }, "related": { "ip": [ - "239.255.255.250", - "192.168.1.33" + "192.168.1.33", + "239.255.255.250" ] }, "source": { @@ -733,8 +733,8 @@ }, "related": { "ip": [ - "239.255.255.250", - "192.168.1.33" + "192.168.1.33", + "239.255.255.250" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-valid-01.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-valid-01.golden.json index 20ea4e61d31..b65cc125f57 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-valid-01.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/Netflow-9-valid-01.golden.json @@ -144,8 +144,8 @@ }, "related": { "ip": [ - "172.16.32.248", - "172.16.32.100" + "172.16.32.100", + "172.16.32.248" ] }, "source": { @@ -302,8 +302,8 @@ }, "related": { "ip": [ - "172.16.32.201", - "172.16.32.100" + "172.16.32.100", + "172.16.32.201" ] }, "source": { @@ -460,8 +460,8 @@ }, "related": { "ip": [ - "172.16.32.202", - "172.16.32.100" + "172.16.32.100", + "172.16.32.202" ] }, "source": { @@ -480,6 +480,8 @@ "Meta": null, "Fields": { "destination": { + "ip": "ff02::1", + "locality": "internal", "port": 34304 }, "event": { @@ -497,7 +499,7 @@ ] }, "flow": { - "id": "tYpw8DU5u10", + "id": "hsSxbBU-M1o", "locality": "internal" }, "netflow": { @@ -526,7 +528,7 @@ }, "network": { "bytes": 672, - "community_id": "1:vK+Zeop1Y3GHxfFGVF2/COcNBWw=", + "community_id": "1:z1qoJyUMuKy3HX8rkIDvBK/vyL8=", "direction": "unknown", "iana_number": 58, "packets": 7, @@ -535,8 +537,16 @@ "observer": { "ip": "192.0.2.1" }, + "related": { + "ip": [ + "fe80::20c:29ff:fe83:3b6e", + "ff02::1" + ] + }, "source": { "bytes": 672, + "ip": "fe80::20c:29ff:fe83:3b6e", + "locality": "internal", "packets": 7, "port": 0 } diff --git a/x-pack/filebeat/input/netflow/testdata/golden/netflow9_e10s_4_7byte_pad.pcap.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/netflow9_e10s_4_7byte_pad.pcap.golden.json index 82a9efb87aa..869aefe8629 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/netflow9_e10s_4_7byte_pad.pcap.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/netflow9_e10s_4_7byte_pad.pcap.golden.json @@ -61,8 +61,8 @@ }, "related": { "ip": [ - "10.127.32.11", - "10.36.236.100" + "10.36.236.100", + "10.127.32.11" ] }, "source": { @@ -286,8 +286,8 @@ }, "related": { "ip": [ - "52.206.251.4", - "10.36.236.100" + "10.36.236.100", + "52.206.251.4" ] }, "source": { @@ -436,8 +436,8 @@ }, "related": { "ip": [ - "10.36.237.22", - "10.36.228.103" + "10.36.228.103", + "10.36.237.22" ] }, "source": { diff --git a/x-pack/filebeat/input/netflow/testdata/golden/netflow9_ubiquiti_edgerouter.pcap.golden.json b/x-pack/filebeat/input/netflow/testdata/golden/netflow9_ubiquiti_edgerouter.pcap.golden.json index ea23d1283ad..48a368eed06 100644 --- a/x-pack/filebeat/input/netflow/testdata/golden/netflow9_ubiquiti_edgerouter.pcap.golden.json +++ b/x-pack/filebeat/input/netflow/testdata/golden/netflow9_ubiquiti_edgerouter.pcap.golden.json @@ -836,8 +836,8 @@ }, "related": { "ip": [ - "192.168.1.4", - "10.100.0.1" + "10.100.0.1", + "192.168.1.4" ] }, "source": { From 377f97b1f72dff2a41657f083fda2943ef1c67f7 Mon Sep 17 00:00:00 2001 From: Alex Resnick Date: Mon, 13 Dec 2021 23:08:32 -0600 Subject: [PATCH 03/57] Move `fips_enabled` setting to AWS Common (#28899) --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/modules/aws.asciidoc | 18 +++++ .../docs/inputs/input-aws-s3.asciidoc | 4 +- x-pack/filebeat/filebeat.reference.yml | 42 ++++++++++ x-pack/filebeat/input/awscloudwatch/input.go | 3 +- x-pack/filebeat/input/awss3/config.go | 4 +- x-pack/filebeat/input/awss3/config_test.go | 1 - x-pack/filebeat/input/awss3/input.go | 13 +--- x-pack/filebeat/module/aws/_meta/config.yml | 36 +++++++++ .../module/aws/cloudtrail/config/aws-s3.yml | 4 + .../module/aws/cloudtrail/manifest.yml | 1 + .../module/aws/cloudwatch/config/aws-s3.yml | 4 + .../module/aws/cloudwatch/manifest.yml | 1 + .../filebeat/module/aws/ec2/config/aws-s3.yml | 4 + x-pack/filebeat/module/aws/ec2/manifest.yml | 1 + .../filebeat/module/aws/elb/config/aws-s3.yml | 4 + x-pack/filebeat/module/aws/elb/manifest.yml | 1 + .../module/aws/s3access/config/aws-s3.yml | 4 + .../filebeat/module/aws/s3access/manifest.yml | 1 + .../module/aws/vpcflow/config/input.yml | 4 + .../filebeat/module/aws/vpcflow/manifest.yml | 1 + .../module/awsfargate/_meta/config.yml | 6 ++ .../awsfargate/log/config/aws-cloudwatch.yml | 8 ++ .../module/awsfargate/log/manifest.yml | 2 + x-pack/filebeat/modules.d/aws.yml.disabled | 36 +++++++++ .../modules.d/awsfargate.yml.disabled | 6 ++ .../providers/aws/ec2/provider.go | 6 +- .../providers/aws/elb/provider.go | 6 +- x-pack/libbeat/common/aws/credentials.go | 64 +++++++++++----- x-pack/libbeat/common/aws/credentials_test.go | 76 +++++++++++++++++++ .../docs/aws-credentials-config.asciidoc | 2 + .../metricbeat/module/aws/_meta/docs.asciidoc | 18 +++++ x-pack/metricbeat/module/aws/aws.go | 16 +++- .../metricbeat/module/aws/billing/billing.go | 19 ++++- .../module/aws/cloudwatch/cloudwatch.go | 18 +++-- .../module/aws/cloudwatch/metadata.go | 8 +- .../module/aws/cloudwatch/metadata/ec2/ec2.go | 5 +- .../module/aws/cloudwatch/metadata/rds/rds.go | 5 +- .../module/aws/cloudwatch/metadata/sqs/sqs.go | 5 +- 39 files changed, 398 insertions(+), 60 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 50cb3176b8f..ba9d9007541 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -229,6 +229,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] - Support self signed certificates on outputs {pull}29229[29229] - Update k8s library {pull}29394[29394] +- Add FIPS configuration option for all AWS API calls. {pull}[28899] *Auditbeat* diff --git a/metricbeat/docs/modules/aws.asciidoc b/metricbeat/docs/modules/aws.asciidoc index 75991b5bd09..e0fa06851e5 100644 --- a/metricbeat/docs/modules/aws.asciidoc +++ b/metricbeat/docs/modules/aws.asciidoc @@ -68,6 +68,24 @@ If endpoint is specified, `regions` config becomes required. For example: - ec2 ---- +* *fips_enabled* + +Enforces the use of FIPS service endpoints. See <> for more information. + +[source,yaml] +---- +- module: aws + period: 5m + fips_enabled: true + regions: + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 + metricsets: + - ec2 +---- + The aws module comes with a predefined dashboard. For example: image::./images/metricbeat-aws-overview.png[] diff --git a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc index ec7a16cd67b..e771258562c 100644 --- a/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc @@ -182,9 +182,7 @@ file_selectors: [float] ==== `fips_enabled` -Enabling this option changes the service name from `s3` to `s3-fips` for -connecting to the correct service endpoint. For example: -`s3-fips.us-gov-east-1.amazonaws.com`. +Moved to <>. [id="input-{type}-include_s3_metadata"] [float] diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 8a8fdeb360f..d5df5da53ab 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -160,6 +160,12 @@ filebeat.modules: # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + cloudwatch: enabled: false @@ -212,6 +218,12 @@ filebeat.modules: # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + ec2: enabled: false @@ -264,6 +276,12 @@ filebeat.modules: # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + elb: enabled: false @@ -316,6 +334,12 @@ filebeat.modules: # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + s3access: enabled: false @@ -368,6 +392,12 @@ filebeat.modules: # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + vpcflow: enabled: false @@ -420,6 +450,12 @@ filebeat.modules: # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + #----------------------------- AWS Fargate Module ----------------------------- - module: awsfargate log: @@ -476,6 +512,12 @@ filebeat.modules: # Default api_sleep is 200 ms #var.api_sleep: 200ms + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + #-------------------------------- Azure Module -------------------------------- - module: azure # All logs diff --git a/x-pack/filebeat/input/awscloudwatch/input.go b/x-pack/filebeat/input/awscloudwatch/input.go index 3001449378d..22f9efe15c6 100644 --- a/x-pack/filebeat/input/awscloudwatch/input.go +++ b/x-pack/filebeat/input/awscloudwatch/input.go @@ -131,7 +131,8 @@ func NewInput(cfg *common.Config, connector channel.Connector, context input.Con // Run runs the input func (in *awsCloudWatchInput) Run() { // Please see https://docs.aws.amazon.com/general/latest/gr/cwl_region.html for more info on Amazon CloudWatch Logs endpoints. - cwConfig := awscommon.EnrichAWSConfigWithEndpoint(in.config.AwsConfig.Endpoint, "logs", in.config.RegionName, in.awsConfig) + logsServiceName := awscommon.CreateServiceName("logs", in.config.AwsConfig.FIPSEnabled, in.config.RegionName) + cwConfig := awscommon.EnrichAWSConfigWithEndpoint(in.config.AwsConfig.Endpoint, logsServiceName, in.config.RegionName, in.awsConfig) svc := cloudwatchlogs.New(cwConfig) var logGroupNames []string diff --git a/x-pack/filebeat/input/awss3/config.go b/x-pack/filebeat/input/awss3/config.go index d25b99a69bd..cd26b57d0b1 100644 --- a/x-pack/filebeat/input/awss3/config.go +++ b/x-pack/filebeat/input/awss3/config.go @@ -25,7 +25,6 @@ type config struct { SQSWaitTime time.Duration `config:"sqs.wait_time"` // The max duration for which the SQS ReceiveMessage call waits for a message to arrive in the queue before returning. SQSMaxReceiveCount int `config:"sqs.max_receive_count"` // The max number of times a message should be received (retried) before deleting it. SQSScript *scriptConfig `config:"sqs.notification_parsing_script"` - FIPSEnabled bool `config:"fips_enabled"` MaxNumberOfMessages int `config:"max_number_of_messages"` QueueURL string `config:"queue_url"` BucketARN string `config:"bucket_arn"` @@ -48,7 +47,6 @@ func defaultConfig() config { BucketListPrefix: "", SQSWaitTime: 20 * time.Second, SQSMaxReceiveCount: 5, - FIPSEnabled: false, MaxNumberOfMessages: 5, PathStyle: false, } @@ -99,7 +97,7 @@ func (c *config) Validate() error { c.APITimeout, c.SQSWaitTime) } - if c.FIPSEnabled && c.NonAWSBucketName != "" { + if c.AWSConfig.FIPSEnabled && c.NonAWSBucketName != "" { return errors.New("fips_enabled cannot be used with a non-AWS S3 bucket.") } if c.PathStyle && c.NonAWSBucketName == "" { diff --git a/x-pack/filebeat/input/awss3/config_test.go b/x-pack/filebeat/input/awss3/config_test.go index 189ce447ff8..e465d5b1b01 100644 --- a/x-pack/filebeat/input/awss3/config_test.go +++ b/x-pack/filebeat/input/awss3/config_test.go @@ -38,7 +38,6 @@ func TestConfig(t *testing.T) { SQSWaitTime: 20 * time.Second, BucketListInterval: 120 * time.Second, BucketListPrefix: "", - FIPSEnabled: false, PathStyle: false, MaxNumberOfMessages: 5, ReaderConfig: readerConfig{ diff --git a/x-pack/filebeat/input/awss3/input.go b/x-pack/filebeat/input/awss3/input.go index bf3f8cf28b2..8d673cabbac 100644 --- a/x-pack/filebeat/input/awss3/input.go +++ b/x-pack/filebeat/input/awss3/input.go @@ -155,13 +155,11 @@ func (in *s3Input) Run(inputContext v2.Context, pipeline beat.Pipeline) error { } func (in *s3Input) createSQSReceiver(ctx v2.Context, client beat.Client) (*sqsReader, error) { - s3ServiceName := "s3" - if in.config.FIPSEnabled { - s3ServiceName = "s3-fips" - } + s3ServiceName := awscommon.CreateServiceName("s3", in.config.AWSConfig.FIPSEnabled, in.awsConfig.Region) + sqsServiceName := awscommon.CreateServiceName("sqs", in.config.AWSConfig.FIPSEnabled, in.awsConfig.Region) sqsAPI := &awsSQSAPI{ - client: sqs.New(awscommon.EnrichAWSConfigWithEndpoint(in.config.AWSConfig.Endpoint, "sqs", in.awsConfig.Region, in.awsConfig)), + client: sqs.New(awscommon.EnrichAWSConfigWithEndpoint(in.config.AWSConfig.Endpoint, sqsServiceName, in.awsConfig.Region, in.awsConfig)), queueURL: in.config.QueueURL, apiTimeout: in.config.APITimeout, visibilityTimeout: in.config.VisibilityTimeout, @@ -198,10 +196,7 @@ func (in *s3Input) createSQSReceiver(ctx v2.Context, client beat.Client) (*sqsRe } func (in *s3Input) createS3Lister(ctx v2.Context, cancelCtx context.Context, client beat.Client, persistentStore *statestore.Store, states *states) (*s3Poller, error) { - s3ServiceName := "s3" - if in.config.FIPSEnabled { - s3ServiceName = "s3-fips" - } + s3ServiceName := awscommon.CreateServiceName("s3", in.config.AWSConfig.FIPSEnabled, in.awsConfig.Region) var bucketName string var bucketID string if in.config.NonAWSBucketName != "" { diff --git a/x-pack/filebeat/module/aws/_meta/config.yml b/x-pack/filebeat/module/aws/_meta/config.yml index 1b139779b84..35d92a11bfe 100644 --- a/x-pack/filebeat/module/aws/_meta/config.yml +++ b/x-pack/filebeat/module/aws/_meta/config.yml @@ -63,6 +63,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + cloudwatch: enabled: false @@ -115,6 +121,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + ec2: enabled: false @@ -167,6 +179,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + elb: enabled: false @@ -219,6 +237,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + s3access: enabled: false @@ -271,6 +295,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + vpcflow: enabled: false @@ -322,3 +352,9 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: diff --git a/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml b/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml index c95abb1cdc2..48f38574e71 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml @@ -81,6 +81,10 @@ max_number_of_messages: {{ .max_number_of_messages }} proxy_url: {{ .proxy_url }} {{ end }} +{{ if .ssl }} +ssl: {{ .ssl | tojson }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml index c0715d7647c..1eb9d949b01 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml @@ -28,6 +28,7 @@ var: - name: fips_enabled - name: proxy_url - name: max_number_of_messages + - name: ssl ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/cloudwatch/config/aws-s3.yml b/x-pack/filebeat/module/aws/cloudwatch/config/aws-s3.yml index 5b1fb24f561..d972e73ee31 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/config/aws-s3.yml @@ -66,6 +66,10 @@ max_number_of_messages: {{ .max_number_of_messages }} proxy_url: {{ .proxy_url }} {{ end }} +{{ if .ssl }} +ssl: {{ .ssl | tojson }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml index 0223142a6a9..7ffff3514a0 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml @@ -22,6 +22,7 @@ var: - name: fips_enabled - name: proxy_url - name: max_number_of_messages + - name: ssl ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/ec2/config/aws-s3.yml b/x-pack/filebeat/module/aws/ec2/config/aws-s3.yml index 5b1fb24f561..d972e73ee31 100644 --- a/x-pack/filebeat/module/aws/ec2/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/ec2/config/aws-s3.yml @@ -66,6 +66,10 @@ max_number_of_messages: {{ .max_number_of_messages }} proxy_url: {{ .proxy_url }} {{ end }} +{{ if .ssl }} +ssl: {{ .ssl | tojson }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/ec2/manifest.yml b/x-pack/filebeat/module/aws/ec2/manifest.yml index 0223142a6a9..7ffff3514a0 100644 --- a/x-pack/filebeat/module/aws/ec2/manifest.yml +++ b/x-pack/filebeat/module/aws/ec2/manifest.yml @@ -22,6 +22,7 @@ var: - name: fips_enabled - name: proxy_url - name: max_number_of_messages + - name: ssl ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/elb/config/aws-s3.yml b/x-pack/filebeat/module/aws/elb/config/aws-s3.yml index 5b1fb24f561..d972e73ee31 100644 --- a/x-pack/filebeat/module/aws/elb/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/elb/config/aws-s3.yml @@ -66,6 +66,10 @@ max_number_of_messages: {{ .max_number_of_messages }} proxy_url: {{ .proxy_url }} {{ end }} +{{ if .ssl }} +ssl: {{ .ssl | tojson }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/elb/manifest.yml b/x-pack/filebeat/module/aws/elb/manifest.yml index da22ed1b1cc..5f0b2d16e3d 100644 --- a/x-pack/filebeat/module/aws/elb/manifest.yml +++ b/x-pack/filebeat/module/aws/elb/manifest.yml @@ -22,6 +22,7 @@ var: - name: fips_enabled - name: proxy_url - name: max_number_of_messages + - name: ssl ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml b/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml index 5b1fb24f561..d972e73ee31 100644 --- a/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml @@ -66,6 +66,10 @@ max_number_of_messages: {{ .max_number_of_messages }} proxy_url: {{ .proxy_url }} {{ end }} +{{ if .ssl }} +ssl: {{ .ssl | tojson }} +{{ end }} + tags: {{.tags | tojson}} publisher_pipeline.disable_host: {{ inList .tags "forwarded" }} diff --git a/x-pack/filebeat/module/aws/s3access/manifest.yml b/x-pack/filebeat/module/aws/s3access/manifest.yml index 0223142a6a9..7ffff3514a0 100644 --- a/x-pack/filebeat/module/aws/s3access/manifest.yml +++ b/x-pack/filebeat/module/aws/s3access/manifest.yml @@ -22,6 +22,7 @@ var: - name: fips_enabled - name: proxy_url - name: max_number_of_messages + - name: ssl ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/module/aws/vpcflow/config/input.yml b/x-pack/filebeat/module/aws/vpcflow/config/input.yml index 51b649b1e2e..6ac1ceccf66 100644 --- a/x-pack/filebeat/module/aws/vpcflow/config/input.yml +++ b/x-pack/filebeat/module/aws/vpcflow/config/input.yml @@ -68,6 +68,10 @@ max_number_of_messages: {{ .max_number_of_messages }} proxy_url: {{ .proxy_url }} {{ end }} +{{ if .ssl }} +ssl: {{ .ssl | tojson }} +{{ end }} + {{ else if eq .input "file" }} type: log diff --git a/x-pack/filebeat/module/aws/vpcflow/manifest.yml b/x-pack/filebeat/module/aws/vpcflow/manifest.yml index 8871cf1cffb..be8642f06cb 100644 --- a/x-pack/filebeat/module/aws/vpcflow/manifest.yml +++ b/x-pack/filebeat/module/aws/vpcflow/manifest.yml @@ -22,6 +22,7 @@ var: - name: fips_enabled - name: proxy_url - name: max_number_of_messages + - name: ssl ingest_pipeline: ingest/pipeline.yml input: config/input.yml diff --git a/x-pack/filebeat/module/awsfargate/_meta/config.yml b/x-pack/filebeat/module/awsfargate/_meta/config.yml index 2318b322e9a..8d1b03f49dc 100644 --- a/x-pack/filebeat/module/awsfargate/_meta/config.yml +++ b/x-pack/filebeat/module/awsfargate/_meta/config.yml @@ -52,3 +52,9 @@ # Time used to sleep between AWS FilterLogEvents API calls inside the same collection period # Default api_sleep is 200 ms #var.api_sleep: 200ms + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: diff --git a/x-pack/filebeat/module/awsfargate/log/config/aws-cloudwatch.yml b/x-pack/filebeat/module/awsfargate/log/config/aws-cloudwatch.yml index f7f3199028c..958228e74da 100644 --- a/x-pack/filebeat/module/awsfargate/log/config/aws-cloudwatch.yml +++ b/x-pack/filebeat/module/awsfargate/log/config/aws-cloudwatch.yml @@ -56,6 +56,14 @@ session_token: {{ .session_token }} role_arn: {{ .role_arn }} {{ end }} +{{ if .proxy_url }} +proxy_url: {{ .proxy_url }} +{{ end }} + +{{ if .ssl }} +ssl: {{ .ssl | tojson }} +{{ end }} + processors: - add_fields: target: '' diff --git a/x-pack/filebeat/module/awsfargate/log/manifest.yml b/x-pack/filebeat/module/awsfargate/log/manifest.yml index ca5fb61d056..47fbc7697d1 100644 --- a/x-pack/filebeat/module/awsfargate/log/manifest.yml +++ b/x-pack/filebeat/module/awsfargate/log/manifest.yml @@ -20,6 +20,8 @@ var: - name: scan_frequency - name: api_timeout - name: api_sleep + - name: proxy_url + - name: ssl ingest_pipeline: ingest/pipeline.yml input: config/{{.input}}.yml diff --git a/x-pack/filebeat/modules.d/aws.yml.disabled b/x-pack/filebeat/modules.d/aws.yml.disabled index 3d34116d225..1cde529a6d3 100644 --- a/x-pack/filebeat/modules.d/aws.yml.disabled +++ b/x-pack/filebeat/modules.d/aws.yml.disabled @@ -66,6 +66,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + cloudwatch: enabled: false @@ -118,6 +124,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + ec2: enabled: false @@ -170,6 +182,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + elb: enabled: false @@ -222,6 +240,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + s3access: enabled: false @@ -274,6 +298,12 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: + vpcflow: enabled: false @@ -325,3 +355,9 @@ # The maximum number of messages to return from SQS. Valid values: 1 to 10. #var.max_number_of_messages: 5 + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: diff --git a/x-pack/filebeat/modules.d/awsfargate.yml.disabled b/x-pack/filebeat/modules.d/awsfargate.yml.disabled index 225892f7fbe..57a5e419135 100644 --- a/x-pack/filebeat/modules.d/awsfargate.yml.disabled +++ b/x-pack/filebeat/modules.d/awsfargate.yml.disabled @@ -55,3 +55,9 @@ # Time used to sleep between AWS FilterLogEvents API calls inside the same collection period # Default api_sleep is 200 ms #var.api_sleep: 200ms + + # URL to proxy AWS API calls + #var.proxy_url: http://proxy:3128 + + # Configures the SSL settings, ie. set trusted CAs, ignore certificate verification.... + #var.ssl: diff --git a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go index 19d8c8a9784..3cab8c3ca28 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go +++ b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go @@ -64,8 +64,9 @@ func AutodiscoverBuilder( if config.Regions == nil { // set default region to make initial aws api call awsCfg.Region = "us-west-1" + ec2ServiceName := awscommon.CreateServiceName("ec2", config.AWSConfig.FIPSEnabled, awsCfg.Region) svcEC2 := ec2.New(awscommon.EnrichAWSConfigWithEndpoint( - config.AWSConfig.Endpoint, "ec2", awsCfg.Region, awsCfg)) + config.AWSConfig.Endpoint, ec2ServiceName, awsCfg.Region, awsCfg)) completeRegionsList, err := awsauto.GetRegions(svcEC2) if err != nil { @@ -81,8 +82,9 @@ func AutodiscoverBuilder( logp.Error(errors.Wrap(err, "error loading AWS config for aws_ec2 autodiscover provider")) } awsCfg.Region = region + ec2ServiceName := awscommon.CreateServiceName("ec2", config.AWSConfig.FIPSEnabled, region) clients = append(clients, ec2.New(awscommon.EnrichAWSConfigWithEndpoint( - config.AWSConfig.Endpoint, "ec2", region, awsCfg))) + config.AWSConfig.Endpoint, ec2ServiceName, region, awsCfg))) } return internalBuilder(uuid, bus, config, newAPIFetcher(clients), keystore) diff --git a/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go b/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go index 39313f36871..8f39ea10ce4 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go +++ b/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go @@ -63,8 +63,9 @@ func AutodiscoverBuilder( // Construct MetricSet with a full regions list if there is no region specified. if config.Regions == nil { + ec2ServiceName := awscommon.CreateServiceName("ec2", config.AWSConfig.FIPSEnabled, awsCfg.Region) svcEC2 := ec2.New(awscommon.EnrichAWSConfigWithEndpoint( - config.AWSConfig.Endpoint, "ec2", awsCfg.Region, awsCfg)) + config.AWSConfig.Endpoint, ec2ServiceName, awsCfg.Region, awsCfg)) completeRegionsList, err := awsauto.GetRegions(svcEC2) if err != nil { @@ -86,8 +87,9 @@ func AutodiscoverBuilder( logp.Err("error loading AWS config for aws_elb autodiscover provider: %s", err) } awsCfg.Region = region + elbServiceName := awscommon.CreateServiceName("elasticloadbalancing", config.AWSConfig.FIPSEnabled, region) clients = append(clients, elasticloadbalancingv2.New(awscommon.EnrichAWSConfigWithEndpoint( - config.AWSConfig.Endpoint, "elasticloadbalancing", region, awsCfg))) + config.AWSConfig.Endpoint, elbServiceName, region, awsCfg))) } return internalBuilder(uuid, bus, config, newAPIFetcher(clients), keystore) diff --git a/x-pack/libbeat/common/aws/credentials.go b/x-pack/libbeat/common/aws/credentials.go index 08bcbe56875..9b6d80c9528 100644 --- a/x-pack/libbeat/common/aws/credentials.go +++ b/x-pack/libbeat/common/aws/credentials.go @@ -5,8 +5,10 @@ package aws import ( + "crypto/tls" "net/http" "net/url" + "strings" awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -16,36 +18,51 @@ import ( "github.com/pkg/errors" "github.com/elastic/beats/v7/libbeat/common/transport/httpcommon" + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/elastic/beats/v7/libbeat/logp" ) +// OptionalGovCloudFIPS is a list of services on AWS GovCloud that is not FIPS by default. +// These services follow the standard -fips..amazonaws.com format. +var OptionalGovCloudFIPS = map[string]bool{ + "s3": true, +} + // ConfigAWS is a structure defined for AWS credentials type ConfigAWS struct { - AccessKeyID string `config:"access_key_id"` - SecretAccessKey string `config:"secret_access_key"` - SessionToken string `config:"session_token"` - ProfileName string `config:"credential_profile_name"` - SharedCredentialFile string `config:"shared_credential_file"` - Endpoint string `config:"endpoint"` - RoleArn string `config:"role_arn"` - ProxyUrl string `config:"proxy_url"` + AccessKeyID string `config:"access_key_id"` + SecretAccessKey string `config:"secret_access_key"` + SessionToken string `config:"session_token"` + ProfileName string `config:"credential_profile_name"` + SharedCredentialFile string `config:"shared_credential_file"` + Endpoint string `config:"endpoint"` + RoleArn string `config:"role_arn"` + ProxyUrl string `config:"proxy_url"` + FIPSEnabled bool `config:"fips_enabled"` + TLS *tlscommon.Config `config:"ssl" yaml:"ssl,omitempty" json:"ssl,omitempty"` } // InitializeAWSConfig function creates the awssdk.Config object from the provided config func InitializeAWSConfig(config ConfigAWS) (awssdk.Config, error) { AWSConfig, _ := GetAWSCredentials(config) + var proxy func(*http.Request) (*url.URL, error) if config.ProxyUrl != "" { proxyUrl, err := httpcommon.NewProxyURIFromString(config.ProxyUrl) if err != nil { return AWSConfig, err } - - httpClient := &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyURL(proxyUrl.URI()), - }, - } - AWSConfig.HTTPClient = httpClient + proxy = http.ProxyURL(proxyUrl.URI()) + } + var tlsConfig *tls.Config + if config.TLS != nil { + TLSConfig, _ := tlscommon.LoadTLSConfig(config.TLS) + tlsConfig = TLSConfig.ToConfig() + } + AWSConfig.HTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: proxy, + TLSClientConfig: tlsConfig, + }, } return AWSConfig, nil } @@ -143,17 +160,30 @@ func getRoleArn(config ConfigAWS, awsConfig awssdk.Config) awssdk.Config { // EnrichAWSConfigWithEndpoint function enabled endpoint resolver for AWS // service clients when endpoint is given in config. func EnrichAWSConfigWithEndpoint(endpoint string, serviceName string, regionName string, awsConfig awssdk.Config) awssdk.Config { + var eurl string if endpoint != "" { parsedEndpoint, _ := url.Parse(endpoint) if parsedEndpoint.Scheme != "" { awsConfig.EndpointResolver = awssdk.ResolveWithEndpointURL(endpoint) } else { if regionName == "" { - awsConfig.EndpointResolver = awssdk.ResolveWithEndpointURL("https://" + serviceName + "." + endpoint) + eurl = "https://" + serviceName + "." + endpoint } else { - awsConfig.EndpointResolver = awssdk.ResolveWithEndpointURL("https://" + serviceName + "." + regionName + "." + endpoint) + eurl = "https://" + serviceName + "." + regionName + "." + endpoint } + awsConfig.EndpointResolver = awssdk.ResolveWithEndpointURL(eurl) } } return awsConfig } + +//Create AWS service name based on Region and FIPS +func CreateServiceName(serviceName string, fipsEnabled bool, region string) string { + if fipsEnabled { + _, found := OptionalGovCloudFIPS[serviceName] + if !strings.HasPrefix(region, "us-gov-") || found { + return serviceName + "-fips" + } + } + return serviceName +} diff --git a/x-pack/libbeat/common/aws/credentials_test.go b/x-pack/libbeat/common/aws/credentials_test.go index dde6a0665e2..e3c21875385 100644 --- a/x-pack/libbeat/common/aws/credentials_test.go +++ b/x-pack/libbeat/common/aws/credentials_test.go @@ -6,12 +6,36 @@ package aws import ( "context" + "net/http" "testing" awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) +func TestInitializeAWSConfig(t *testing.T) { + inputConfig := ConfigAWS{ + AccessKeyID: "123", + SecretAccessKey: "abc", + TLS: &tlscommon.Config{ + VerificationMode: 1, + }, + ProxyUrl: "http://proxy:3128", + } + awsConfig, err := InitializeAWSConfig(inputConfig) + assert.NoError(t, err) + + retrievedAWSConfig, err := awsConfig.Credentials.Retrieve(context.Background()) + assert.NoError(t, err) + + assert.Equal(t, inputConfig.AccessKeyID, retrievedAWSConfig.AccessKeyID) + assert.Equal(t, inputConfig.SecretAccessKey, retrievedAWSConfig.SecretAccessKey) + assert.Equal(t, true, awsConfig.HTTPClient.(*http.Client).Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify) + assert.NotNil(t, awsConfig.HTTPClient.(*http.Client).Transport.(*http.Transport).Proxy) +} + func TestGetAWSCredentials(t *testing.T) { inputConfig := ConfigAWS{ AccessKeyID: "123", @@ -86,3 +110,55 @@ func TestEnrichAWSConfigWithEndpoint(t *testing.T) { }) } } + +func TestCreateServiceName(t *testing.T) { + cases := []struct { + title string + serviceName string + fips_enabled bool + region string + expectedServiceName string + }{ + { + "S3 - non-fips - us-east-1", + "s3", + false, + "us-east-1", + "s3", + }, + { + "S3 - non-fips - us-gov-east-1", + "s3", + false, + "us-gov-east-1", + "s3", + }, + { + "S3 - fips - us-gov-east-1", + "s3", + true, + "us-gov-east-1", + "s3-fips", + }, + { + "EC2 - fips - us-gov-east-1", + "ec2", + true, + "us-gov-east-1", + "ec2", + }, + { + "EC2 - fips - us-east-1", + "ec2", + true, + "us-east-1", + "ec2-fips", + }, + } + for _, c := range cases { + t.Run(c.title, func(t *testing.T) { + serviceName := CreateServiceName(c.serviceName, c.fips_enabled, c.region) + assert.Equal(t, c.expectedServiceName, serviceName) + }) + } +} diff --git a/x-pack/libbeat/docs/aws-credentials-config.asciidoc b/x-pack/libbeat/docs/aws-credentials-config.asciidoc index d7223720ad4..63a0a0cb639 100644 --- a/x-pack/libbeat/docs/aws-credentials-config.asciidoc +++ b/x-pack/libbeat/docs/aws-credentials-config.asciidoc @@ -18,6 +18,8 @@ services do not include a region. In `aws` module, `endpoint` config is to set the `endpoint-code` part, such as `amazonaws.com`, `amazonaws.com.cn`, `c2s.ic.gov`, `sc2s.sgov.gov`. * *proxy_url*: URL of the proxy to use to connect to AWS web services. The syntax is `http(s)://:` +* *fips_enabled*: Enabling this option changes the service names from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. All services used by Beats are FIPS compatible except for `tagging` but only certain regions are FIPS compatible. See https://aws.amazon.com/compliance/fips/ or the appropriate service page, https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html, for a full list of FIPS endpoints and regions. +* *ssl*: This specifies SSL/TLS configuration. If the ssl section is missing, the host's CAs are used for HTTPS connections. See <> for more information. [float] ==== Supported Formats diff --git a/x-pack/metricbeat/module/aws/_meta/docs.asciidoc b/x-pack/metricbeat/module/aws/_meta/docs.asciidoc index c4ceae59eee..9a9a58887f7 100644 --- a/x-pack/metricbeat/module/aws/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/aws/_meta/docs.asciidoc @@ -58,6 +58,24 @@ If endpoint is specified, `regions` config becomes required. For example: - ec2 ---- +* *fips_enabled* + +Enforces the use of FIPS service endpoints. See <> for more information. + +[source,yaml] +---- +- module: aws + period: 5m + fips_enabled: true + regions: + - us-east-1 + - us-east-2 + - us-west-1 + - us-west-2 + metricsets: + - ec2 +---- + The aws module comes with a predefined dashboard. For example: image::./images/metricbeat-aws-overview.png[] diff --git a/x-pack/metricbeat/module/aws/aws.go b/x-pack/metricbeat/module/aws/aws.go index 0e04ad6f4fa..e11b5a1126a 100644 --- a/x-pack/metricbeat/module/aws/aws.go +++ b/x-pack/metricbeat/module/aws/aws.go @@ -7,6 +7,7 @@ package aws import ( "context" "fmt" + "strings" "time" awssdk "github.com/aws/aws-sdk-go-v2/aws" @@ -105,9 +106,12 @@ func NewMetricSet(base mb.BaseMetricSet) (*MetricSet, error) { awsConfig.Region = config.Regions[0] } + stsServiceName := awscommon.CreateServiceName("sts", config.AWSConfig.FIPSEnabled, awsConfig.Region) + iamServiceName := awscommon.CreateServiceName("iam", config.AWSConfig.FIPSEnabled, awsConfig.Region) + // Get IAM account id svcSts := sts.New(awscommon.EnrichAWSConfigWithEndpoint( - config.AWSConfig.Endpoint, "sts", "", awsConfig)) + config.AWSConfig.Endpoint, stsServiceName, "", awsConfig)) reqIdentity := svcSts.GetCallerIdentityRequest(&sts.GetCallerIdentityInput{}) outputIdentity, err := reqIdentity.Send(context.TODO()) if err != nil { @@ -116,16 +120,20 @@ func NewMetricSet(base mb.BaseMetricSet) (*MetricSet, error) { metricSet.AccountID = *outputIdentity.Account base.Logger().Debug("AWS Credentials belong to account ID: ", metricSet.AccountID) } - + iamRegion := "" + if strings.HasPrefix(awsConfig.Region, "us-gov-") { + iamRegion = "us-gov" + } // Get account name/alias svcIam := iam.New(awscommon.EnrichAWSConfigWithEndpoint( - config.AWSConfig.Endpoint, "iam", "", awsConfig)) + config.AWSConfig.Endpoint, iamServiceName, iamRegion, awsConfig)) metricSet.AccountName = getAccountName(svcIam, base, metricSet) // Construct MetricSet with a full regions list if config.Regions == nil { + ec2ServiceName := awscommon.CreateServiceName("ec2", config.AWSConfig.FIPSEnabled, awsConfig.Region) svcEC2 := ec2.New(awscommon.EnrichAWSConfigWithEndpoint( - config.AWSConfig.Endpoint, "ec2", "", awsConfig)) + config.AWSConfig.Endpoint, ec2ServiceName, "", awsConfig)) completeRegionsList, err := getRegions(svcEC2) if err != nil { return nil, err diff --git a/x-pack/metricbeat/module/aws/billing/billing.go b/x-pack/metricbeat/module/aws/billing/billing.go index c1c66ac0529..b7da00495a7 100644 --- a/x-pack/metricbeat/module/aws/billing/billing.go +++ b/x-pack/metricbeat/module/aws/billing/billing.go @@ -114,6 +114,12 @@ func (c CostExplorerConfig) Validate() error { // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). func (m *MetricSet) Fetch(report mb.ReporterV2) error { + var config aws.Config + err := m.Module().UnpackConfig(&config) + if err != nil { + return nil + } + monitoringServiceName := awscommon.CreateServiceName("monitoring", config.AWSConfig.FIPSEnabled, regionName) // Get startDate and endDate startDate, endDate := getStartDateEndDate(m.Period) @@ -123,11 +129,11 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { // get cost metrics from cost explorer awsConfig := m.MetricSet.AwsConfig.Copy() svcCostExplorer := costexplorer.New(awscommon.EnrichAWSConfigWithEndpoint( - m.Endpoint, "monitoring", "", awsConfig)) + m.Endpoint, monitoringServiceName, "", awsConfig)) awsConfig.Region = regionName svcCloudwatch := cloudwatch.New(awscommon.EnrichAWSConfigWithEndpoint( - m.Endpoint, "monitoring", regionName, awsConfig)) + m.Endpoint, monitoringServiceName, regionName, awsConfig)) timePeriod := costexplorer.DateInterval{ Start: awssdk.String(startDate), @@ -212,10 +218,17 @@ func (m *MetricSet) getCostGroupBy(svcCostExplorer costexploreriface.ClientAPI, // get linked account IDs and names accounts := map[string]string{} + var config aws.Config + err := m.Module().UnpackConfig(&config) + if err != nil { + return nil + } if ok, _ := aws.StringInSlice("LINKED_ACCOUNT", groupByDimKeys); ok { awsConfig := m.MetricSet.AwsConfig.Copy() + organizationsServiceName := awscommon.CreateServiceName("organizations", config.AWSConfig.FIPSEnabled, regionName) + svcOrg := organizations.New(awscommon.EnrichAWSConfigWithEndpoint( - m.Endpoint, "organizations", regionName, awsConfig)) + m.Endpoint, organizationsServiceName, regionName, awsConfig)) accounts = m.getAccountName(svcOrg) } diff --git a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go index 1e67fceac70..9c2f24d87ac 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go @@ -140,18 +140,25 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { m.logger.Debugf("listMetricDetailTotal = %s", listMetricDetailTotal) m.logger.Debugf("namespaceDetailTotal = %s", namespaceDetailTotal) + var config aws.Config + err = m.Module().UnpackConfig(&config) + if err != nil { + return err + } + // Create events based on listMetricDetailTotal from configuration if len(listMetricDetailTotal.metricsWithStats) != 0 { for _, regionName := range m.MetricSet.RegionsList { m.logger.Debugf("Collecting metrics from AWS region %s", regionName) awsConfig := m.MetricSet.AwsConfig.Copy() awsConfig.Region = regionName + monitoringServiceName := awscommon.CreateServiceName("monitoring", config.AWSConfig.FIPSEnabled, regionName) svcCloudwatch := cloudwatch.New(awscommon.EnrichAWSConfigWithEndpoint( - m.Endpoint, "monitoring", regionName, awsConfig)) + m.Endpoint, monitoringServiceName, regionName, awsConfig)) svcResourceAPI := resourcegroupstaggingapi.New(awscommon.EnrichAWSConfigWithEndpoint( - m.Endpoint, "tagging", regionName, awsConfig)) + m.Endpoint, "tagging", regionName, awsConfig)) //Does not support FIPS eventsWithIdentifier, err := m.createEvents(svcCloudwatch, svcResourceAPI, listMetricDetailTotal.metricsWithStats, listMetricDetailTotal.resourceTypeFilters, regionName, startTime, endTime) if err != nil { @@ -173,11 +180,12 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { awsConfig := m.MetricSet.AwsConfig.Copy() awsConfig.Region = regionName + monitoringServiceName := awscommon.CreateServiceName("monitoring", config.AWSConfig.FIPSEnabled, regionName) svcCloudwatch := cloudwatch.New(awscommon.EnrichAWSConfigWithEndpoint( - m.Endpoint, "monitoring", regionName, awsConfig)) + m.Endpoint, monitoringServiceName, regionName, awsConfig)) svcResourceAPI := resourcegroupstaggingapi.New(awscommon.EnrichAWSConfigWithEndpoint( - m.Endpoint, "tagging", regionName, awsConfig)) + m.Endpoint, "tagging", regionName, awsConfig)) //Does not support FIPS for namespace, namespaceDetails := range namespaceDetailTotal { m.logger.Debugf("Collected metrics from namespace %s", namespace) @@ -204,7 +212,7 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { m.logger.Debugf("Collected number of metrics = %d", len(eventsWithIdentifier)) - err = reportEvents(addMetadata(namespace, m.Endpoint, regionName, awsConfig, eventsWithIdentifier), report) + err = reportEvents(addMetadata(namespace, m.Endpoint, regionName, awsConfig, config.AWSConfig.FIPSEnabled, eventsWithIdentifier), report) if err != nil { return errors.Wrap(err, "reportEvents failed") } diff --git a/x-pack/metricbeat/module/aws/cloudwatch/metadata.go b/x-pack/metricbeat/module/aws/cloudwatch/metadata.go index 7c0a82aafdf..be4b637cca7 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/metadata.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/metadata.go @@ -21,14 +21,14 @@ const ( ) // addMetadata adds metadata to the given events map based on namespace -func addMetadata(namespace string, endpoint string, regionName string, awsConfig awssdk.Config, events map[string]mb.Event) map[string]mb.Event { +func addMetadata(namespace string, endpoint string, regionName string, awsConfig awssdk.Config, fips_enabled bool, events map[string]mb.Event) map[string]mb.Event { switch namespace { case namespaceEC2: - return ec2.AddMetadata(endpoint, regionName, awsConfig, events) + return ec2.AddMetadata(endpoint, regionName, awsConfig, fips_enabled, events) case namespaceRDS: - return rds.AddMetadata(endpoint, regionName, awsConfig, events) + return rds.AddMetadata(endpoint, regionName, awsConfig, fips_enabled, events) case namespaceSQS: - return sqs.AddMetadata(endpoint, regionName, awsConfig, events) + return sqs.AddMetadata(endpoint, regionName, awsConfig, fips_enabled, events) default: return events } diff --git a/x-pack/metricbeat/module/aws/cloudwatch/metadata/ec2/ec2.go b/x-pack/metricbeat/module/aws/cloudwatch/metadata/ec2/ec2.go index 8637afd8979..0a60351e907 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/metadata/ec2/ec2.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/metadata/ec2/ec2.go @@ -22,9 +22,10 @@ import ( const metadataPrefix = "aws.ec2.instance." // AddMetadata adds metadata for EC2 instances from a specific region -func AddMetadata(endpoint string, regionName string, awsConfig awssdk.Config, events map[string]mb.Event) map[string]mb.Event { +func AddMetadata(endpoint string, regionName string, awsConfig awssdk.Config, fips_enabled bool, events map[string]mb.Event) map[string]mb.Event { + ec2ServiceName := awscommon.CreateServiceName("ec2", fips_enabled, regionName) svcEC2 := ec2.New(awscommon.EnrichAWSConfigWithEndpoint( - endpoint, "ec2", regionName, awsConfig)) + endpoint, ec2ServiceName, regionName, awsConfig)) instancesOutputs, err := getInstancesPerRegion(svcEC2) if err != nil { diff --git a/x-pack/metricbeat/module/aws/cloudwatch/metadata/rds/rds.go b/x-pack/metricbeat/module/aws/cloudwatch/metadata/rds/rds.go index e732749d016..3e64d3079cb 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/metadata/rds/rds.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/metadata/rds/rds.go @@ -21,9 +21,10 @@ import ( const metadataPrefix = "aws.rds.db_instance." // AddMetadata adds metadata for RDS instances from a specific region -func AddMetadata(endpoint string, regionName string, awsConfig awssdk.Config, events map[string]mb.Event) map[string]mb.Event { +func AddMetadata(endpoint string, regionName string, awsConfig awssdk.Config, fips_enabled bool, events map[string]mb.Event) map[string]mb.Event { + rdsServiceName := awscommon.CreateServiceName("rds", fips_enabled, regionName) svc := rds.New(awscommon.EnrichAWSConfigWithEndpoint( - endpoint, "rds", regionName, awsConfig)) + endpoint, rdsServiceName, regionName, awsConfig)) // Get DBInstance IDs per region dbDetailsMap, err := getDBInstancesPerRegion(svc) diff --git a/x-pack/metricbeat/module/aws/cloudwatch/metadata/sqs/sqs.go b/x-pack/metricbeat/module/aws/cloudwatch/metadata/sqs/sqs.go index 674731f4d4b..812b7d3a5c7 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/metadata/sqs/sqs.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/metadata/sqs/sqs.go @@ -22,9 +22,10 @@ import ( const metadataPrefix = "aws.sqs.queue" // AddMetadata adds metadata for SQS queues from a specific region -func AddMetadata(endpoint string, regionName string, awsConfig awssdk.Config, events map[string]mb.Event) map[string]mb.Event { +func AddMetadata(endpoint string, regionName string, awsConfig awssdk.Config, fips_enabled bool, events map[string]mb.Event) map[string]mb.Event { + sqsServiceName := awscommon.CreateServiceName("sqs", fips_enabled, regionName) svc := sqs.New(awscommon.EnrichAWSConfigWithEndpoint( - endpoint, "sqs", regionName, awsConfig)) + endpoint, sqsServiceName, regionName, awsConfig)) // Get queueUrls for each region queueURLs, err := getQueueUrls(svc) From 2d875e257f5be7e975c83979f45278d5ff160cb5 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 14 Dec 2021 08:42:08 +0000 Subject: [PATCH 04/57] [mergify]: forwardport into the master branch (#29390) --- .mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mergify.yml b/.mergify.yml index c76feb76e98..ade2822ada9 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -6,7 +6,7 @@ pull_request_rules: - name: forward-port patches to master branch conditions: - merged - - label=backport-v8.1.0 + - label=forwardport-master actions: backport: assignees: From 9b893e88cfe109e64638d65c58fd75c2ff695402 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Tue, 14 Dec 2021 15:41:56 +0200 Subject: [PATCH 05/57] Add option to skip older k8s events (#29396) --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/modules/kubernetes.asciidoc | 24 +++++++++++++++++-- metricbeat/metricbeat.reference.yml | 24 +++++++++++++++++-- .../kubernetes/_meta/config.reference.yml | 24 +++++++++++++++++-- metricbeat/module/kubernetes/_meta/config.yml | 19 ++++++++++++++- metricbeat/module/kubernetes/event/config.go | 2 ++ metricbeat/module/kubernetes/event/event.go | 6 +++-- metricbeat/modules.d/kubernetes.yml.disabled | 19 ++++++++++++++- x-pack/metricbeat/metricbeat.reference.yml | 24 +++++++++++++++++-- 9 files changed, 131 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index ba9d9007541..02d272b963c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -282,6 +282,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Remove required for region/zone and make stackdriver a metricset in googlecloud. {issue}16785[16785] {pull}18398[18398] - Add memory metrics into compute googlecloud. {pull}18802[18802] - Preliminary AIX support {pull}27954[27954] +- Add option to skip older k8s events {pull}29396[29396] - Add `add_resource_metadata` configuration to Kubernetes module. {pull}29133[29133] *Packetbeat* diff --git a/metricbeat/docs/modules/kubernetes.asciidoc b/metricbeat/docs/modules/kubernetes.asciidoc index 52f0b07a75f..dd603e9f056 100644 --- a/metricbeat/docs/modules/kubernetes.asciidoc +++ b/metricbeat/docs/modules/kubernetes.asciidoc @@ -281,6 +281,28 @@ metricbeat.modules: # qps: 5 # burst: 10 +# Kubernetes Events +- module: kubernetes + enabled: true + metricsets: + - event + period: 10s + # Skip events older than Metricbeat's statup time is enabled by default. + # Setting to false the skip_older setting will stop filtering older events. + # This setting is also useful went Event's timestamps are not populated properly. + #skip_older: false + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster + #kube_config: ~/.kube/config + # Set the namespace to watch for events + #namespace: staging + # Set the sync period of the watchers + #sync_period: 10m + # Kubernetes client QPS and burst can be configured additionally + #kube_client_options: + # qps: 5 + # burst: 10 + # Kubernetes API server # (when running metricbeat as a deployment) - module: kubernetes @@ -293,8 +315,6 @@ metricbeat.modules: - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt period: 30s - - # Kubernetes proxy server # (when running metricbeat locally at hosts or as a daemonset + host network) - module: kubernetes diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index 46adc5cfdb4..d0ef9b6f6d3 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -559,6 +559,28 @@ metricbeat.modules: # qps: 5 # burst: 10 +# Kubernetes Events +- module: kubernetes + enabled: true + metricsets: + - event + period: 10s + # Skip events older than Metricbeat's statup time is enabled by default. + # Setting to false the skip_older setting will stop filtering older events. + # This setting is also useful went Event's timestamps are not populated properly. + #skip_older: false + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster + #kube_config: ~/.kube/config + # Set the namespace to watch for events + #namespace: staging + # Set the sync period of the watchers + #sync_period: 10m + # Kubernetes client QPS and burst can be configured additionally + #kube_client_options: + # qps: 5 + # burst: 10 + # Kubernetes API server # (when running metricbeat as a deployment) - module: kubernetes @@ -571,8 +593,6 @@ metricbeat.modules: - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt period: 30s - - # Kubernetes proxy server # (when running metricbeat locally at hosts or as a daemonset + host network) - module: kubernetes diff --git a/metricbeat/module/kubernetes/_meta/config.reference.yml b/metricbeat/module/kubernetes/_meta/config.reference.yml index 2a9f4601f86..3a6e22af69b 100644 --- a/metricbeat/module/kubernetes/_meta/config.reference.yml +++ b/metricbeat/module/kubernetes/_meta/config.reference.yml @@ -82,6 +82,28 @@ # qps: 5 # burst: 10 +# Kubernetes Events +- module: kubernetes + enabled: true + metricsets: + - event + period: 10s + # Skip events older than Metricbeat's statup time is enabled by default. + # Setting to false the skip_older setting will stop filtering older events. + # This setting is also useful went Event's timestamps are not populated properly. + #skip_older: false + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster + #kube_config: ~/.kube/config + # Set the namespace to watch for events + #namespace: staging + # Set the sync period of the watchers + #sync_period: 10m + # Kubernetes client QPS and burst can be configured additionally + #kube_client_options: + # qps: 5 + # burst: 10 + # Kubernetes API server # (when running metricbeat as a deployment) - module: kubernetes @@ -94,8 +116,6 @@ - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt period: 30s - - # Kubernetes proxy server # (when running metricbeat locally at hosts or as a daemonset + host network) - module: kubernetes diff --git a/metricbeat/module/kubernetes/_meta/config.yml b/metricbeat/module/kubernetes/_meta/config.yml index 94b0a00427f..9e8eaee745c 100644 --- a/metricbeat/module/kubernetes/_meta/config.yml +++ b/metricbeat/module/kubernetes/_meta/config.yml @@ -58,7 +58,24 @@ # hosts: ["kube-state-metrics:8080"] # add_metadata: true -# Kubernetes events +# Kubernetes Events #- module: kubernetes +# enabled: true # metricsets: # - event +# period: 10s +# # Skip events older than Metricbeat's statup time is enabled by default. +# # Setting to false the skip_older setting will stop filtering older events. +# # This setting is also useful went Event's timestamps are not populated properly. +# skip_older: false +# # If kube_config is not set, KUBECONFIG environment variable will be checked +# # and if not present it will fall back to InCluster +# kube_config: ~/.kube/config +# # Set the namespace to watch for events +# namespace: staging +# # Set the sync period of the watchers +# sync_period: 10m +# # Kubernetes client QPS and burst can be configured additionally +# kube_client_options: +# qps: 5 +# burst: 10 diff --git a/metricbeat/module/kubernetes/event/config.go b/metricbeat/module/kubernetes/event/config.go index 07e9e7da64e..be9b0c21a15 100644 --- a/metricbeat/module/kubernetes/event/config.go +++ b/metricbeat/module/kubernetes/event/config.go @@ -30,6 +30,7 @@ type kubeEventsConfig struct { SyncPeriod time.Duration `config:"sync_period"` LabelsDedot bool `config:"labels.dedot"` AnnotationsDedot bool `config:"annotations.dedot"` + SkipOlder bool `config:"skip_older"` } type Enabled struct { @@ -41,5 +42,6 @@ func defaultKubernetesEventsConfig() kubeEventsConfig { SyncPeriod: 10 * time.Minute, LabelsDedot: true, AnnotationsDedot: true, + SkipOlder: true, } } diff --git a/metricbeat/module/kubernetes/event/event.go b/metricbeat/module/kubernetes/event/event.go index a1504b6ff05..723e1331e61 100644 --- a/metricbeat/module/kubernetes/event/event.go +++ b/metricbeat/module/kubernetes/event/event.go @@ -41,6 +41,7 @@ type MetricSet struct { watcher kubernetes.Watcher watchOptions kubernetes.WatchOptions dedotConfig dedotConfig + skipOlder bool } // dedotConfig defines LabelsDedot and AnnotationsDedot. @@ -87,6 +88,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { dedotConfig: dedotConfig, watcher: watcher, watchOptions: watchOptions, + skipOlder: config.SkipOlder, }, nil } @@ -104,10 +106,10 @@ func (m *MetricSet) Run(reporter mb.PushReporter) { DeleteFunc: nil, } m.watcher.AddEventHandler(kubernetes.FilteringResourceEventHandler{ - // skip events happened before watch FilterFunc: func(obj interface{}) bool { eve := obj.(*kubernetes.Event) - if kubernetes.Time(&eve.LastTimestamp).Before(now) { + // if skipOlder, skip events happened before watch + if m.skipOlder && kubernetes.Time(&eve.LastTimestamp).Before(now) { return false } return true diff --git a/metricbeat/modules.d/kubernetes.yml.disabled b/metricbeat/modules.d/kubernetes.yml.disabled index 144d13fb301..fcfeec14875 100644 --- a/metricbeat/modules.d/kubernetes.yml.disabled +++ b/metricbeat/modules.d/kubernetes.yml.disabled @@ -61,7 +61,24 @@ # hosts: ["kube-state-metrics:8080"] # add_metadata: true -# Kubernetes events +# Kubernetes Events #- module: kubernetes +# enabled: true # metricsets: # - event +# period: 10s +# # Skip events older than Metricbeat's statup time is enabled by default. +# # Setting to false the skip_older setting will stop filtering older events. +# # This setting is also useful went Event's timestamps are not populated properly. +# skip_older: false +# # If kube_config is not set, KUBECONFIG environment variable will be checked +# # and if not present it will fall back to InCluster +# kube_config: ~/.kube/config +# # Set the namespace to watch for events +# namespace: staging +# # Set the sync period of the watchers +# sync_period: 10m +# # Kubernetes client QPS and burst can be configured additionally +# kube_client_options: +# qps: 5 +# burst: 10 diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index c89b9288c47..e09b1c7989b 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -931,6 +931,28 @@ metricbeat.modules: # qps: 5 # burst: 10 +# Kubernetes Events +- module: kubernetes + enabled: true + metricsets: + - event + period: 10s + # Skip events older than Metricbeat's statup time is enabled by default. + # Setting to false the skip_older setting will stop filtering older events. + # This setting is also useful went Event's timestamps are not populated properly. + #skip_older: false + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster + #kube_config: ~/.kube/config + # Set the namespace to watch for events + #namespace: staging + # Set the sync period of the watchers + #sync_period: 10m + # Kubernetes client QPS and burst can be configured additionally + #kube_client_options: + # qps: 5 + # burst: 10 + # Kubernetes API server # (when running metricbeat as a deployment) - module: kubernetes @@ -943,8 +965,6 @@ metricbeat.modules: - /var/run/secrets/kubernetes.io/serviceaccount/ca.crt period: 30s - - # Kubernetes proxy server # (when running metricbeat locally at hosts or as a daemonset + host network) - module: kubernetes From fcb7b81faaece55703c443a32e26b94a1d1f6164 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Tue, 14 Dec 2021 11:28:23 -0800 Subject: [PATCH 06/57] elastic-agent elasticsearch CA fingerprint support (#29128) * initial commit * Update Fingerprint method * Switch to using newly added CATrustedFingerprint attribute * rename flag * Fix broken flag * Add CHANGELOG * Add container support --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../elastic-agent/pkg/agent/cmd/container.go | 4 +++ x-pack/elastic-agent/pkg/agent/cmd/enroll.go | 8 ++++++ .../elastic-agent/pkg/agent/cmd/enroll_cmd.go | 17 +++++++++--- .../pkg/agent/cmd/setup_config.go | 26 ++++++++++--------- 5 files changed, 41 insertions(+), 15 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index b75a405d4bb..e12cade2afb 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -151,4 +151,5 @@ - Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull}28461[28461] - Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull}28096[28096] - Allow pprof endpoints for elastic-agent or beats if enabled. {pull}28983[28983] {pull}29155[29155] +- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull}29128[29128] - Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] diff --git a/x-pack/elastic-agent/pkg/agent/cmd/container.go b/x-pack/elastic-agent/pkg/agent/cmd/container.go index e0f9f3dcfb7..d72b0128430 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/container.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/container.go @@ -96,6 +96,7 @@ The following actions are possible and grouped based on the actions. FLEET_SERVER_ELASTICSEARCH_USERNAME - elasticsearch username for Fleet Server [$ELASTICSEARCH_USERNAME] FLEET_SERVER_ELASTICSEARCH_PASSWORD - elasticsearch password for Fleet Server [$ELASTICSEARCH_PASSWORD] FLEET_SERVER_ELASTICSEARCH_CA - path to certificate authority to use with communicate with elasticsearch [$ELASTICSEARCH_CA] + FLEET_SERVER_ELASTICSEARCH_CA_TRUSTED_FINGERPRINT - The sha-256 fingerprint value of the certificate authority to trust FLEET_SERVER_ELASTICSEARCH_INSECURE - disables cert validation for communication with Elasticsearch FLEET_SERVER_SERVICE_TOKEN - service token to use for communication with elasticsearch FLEET_SERVER_POLICY_ID - policy ID for Fleet Server to use for itself ("Default Fleet Server policy" used when undefined) @@ -359,6 +360,9 @@ func buildEnrollArgs(cfg setupConfig, token string, policyID string) ([]string, if cfg.FleetServer.Elasticsearch.CA != "" { args = append(args, "--fleet-server-es-ca", cfg.FleetServer.Elasticsearch.CA) } + if cfg.FleetServer.Elasticsearch.CATrustedFingerprint != "" { + args = append(args, "--fleet-server-es-ca-trusted-fingerprint", cfg.FleetServer.Elasticsearch.CATrustedFingerprint) + } if cfg.FleetServer.Host != "" { args = append(args, "--fleet-server-host", cfg.FleetServer.Host) } diff --git a/x-pack/elastic-agent/pkg/agent/cmd/enroll.go b/x-pack/elastic-agent/pkg/agent/cmd/enroll.go index 126161fa4c4..6a66b4fdfc1 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/enroll.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/enroll.go @@ -53,6 +53,7 @@ func addEnrollFlags(cmd *cobra.Command) { cmd.Flags().StringP("enrollment-token", "t", "", "Enrollment token to use to enroll Agent into Fleet") cmd.Flags().StringP("fleet-server-es", "", "", "Start and run a Fleet Server along side this Elastic Agent connecting to the provided elasticsearch") cmd.Flags().StringP("fleet-server-es-ca", "", "", "Path to certificate authority to use with communicate with elasticsearch") + cmd.Flags().StringP("fleet-server-es-ca-trusted-fingerprint", "", "", "Elasticsearch certificate authority's SHA256 fingerprint") cmd.Flags().BoolP("fleet-server-es-insecure", "", false, "Disables validation of certificates") cmd.Flags().StringP("fleet-server-service-token", "", "", "Service token to use for communication with elasticsearch") cmd.Flags().StringP("fleet-server-policy", "", "", "Start and run a Fleet Server on this specific policy") @@ -103,6 +104,7 @@ func buildEnrollmentFlags(cmd *cobra.Command, url string, token string) []string } fServer, _ := cmd.Flags().GetString("fleet-server-es") fElasticSearchCA, _ := cmd.Flags().GetString("fleet-server-es-ca") + fElasticSearchCASHA256, _ := cmd.Flags().GetString("fleet-server-es-ca-trusted-fingerprint") fElasticSearchInsecure, _ := cmd.Flags().GetBool("fleet-server-es-insecure") fServiceToken, _ := cmd.Flags().GetString("fleet-server-service-token") fPolicy, _ := cmd.Flags().GetString("fleet-server-policy") @@ -140,6 +142,10 @@ func buildEnrollmentFlags(cmd *cobra.Command, url string, token string) []string args = append(args, "--fleet-server-es-ca") args = append(args, fElasticSearchCA) } + if fElasticSearchCASHA256 != "" { + args = append(args, "--fleet-server-es-ca-trusted-fingerprint") + args = append(args, fElasticSearchCASHA256) + } if fServiceToken != "" { args = append(args, "--fleet-server-service-token") args = append(args, fServiceToken) @@ -285,6 +291,7 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command, args []string) error { enrollmentToken, _ := cmd.Flags().GetString("enrollment-token") fServer, _ := cmd.Flags().GetString("fleet-server-es") fElasticSearchCA, _ := cmd.Flags().GetString("fleet-server-es-ca") + fElasticSearchCASHA256, _ := cmd.Flags().GetString("fleet-server-es-ca-trusted-fingerprint") fElasticSearchInsecure, _ := cmd.Flags().GetBool("fleet-server-es-insecure") fHeaders, _ := cmd.Flags().GetStringSlice("header") fServiceToken, _ := cmd.Flags().GetString("fleet-server-service-token") @@ -326,6 +333,7 @@ func enroll(streams *cli.IOStreams, cmd *cobra.Command, args []string) error { FleetServer: enrollCmdFleetServerOption{ ConnStr: fServer, ElasticsearchCA: fElasticSearchCA, + ElasticsearchCASHA256: fElasticSearchCASHA256, ElasticsearchInsecure: fElasticSearchInsecure, ServiceToken: fServiceToken, PolicyID: fPolicy, diff --git a/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd.go b/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd.go index 25365c9afe9..0023d83fde0 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd.go @@ -77,6 +77,7 @@ type enrollCmd struct { type enrollCmdFleetServerOption struct { ConnStr string ElasticsearchCA string + ElasticsearchCASHA256 string ElasticsearchInsecure bool ServiceToken string PolicyID string @@ -110,6 +111,7 @@ type enrollCmdOption struct { FleetServer enrollCmdFleetServerOption `yaml:"-"` } +// remoteConfig returns the configuration used to connect the agent to a fleet process. func (e *enrollCmdOption) remoteConfig() (remote.Config, error) { cfg, err := remote.NewConfigFromURL(e.URL) if err != nil { @@ -311,7 +313,7 @@ func (c *enrollCmd) fleetServerBootstrap(ctx context.Context, persistentConfig m c.options.FleetServer.ConnStr, c.options.FleetServer.ServiceToken, c.options.FleetServer.PolicyID, c.options.FleetServer.Host, c.options.FleetServer.Port, c.options.FleetServer.InternalPort, - c.options.FleetServer.Cert, c.options.FleetServer.CertKey, c.options.FleetServer.ElasticsearchCA, + c.options.FleetServer.Cert, c.options.FleetServer.CertKey, c.options.FleetServer.ElasticsearchCA, c.options.FleetServer.ElasticsearchCASHA256, c.options.FleetServer.Headers, c.options.ProxyURL, c.options.ProxyDisabled, @@ -517,7 +519,7 @@ func (c *enrollCmd) enroll(ctx context.Context, persistentConfig map[string]inte c.options.FleetServer.ConnStr, c.options.FleetServer.ServiceToken, c.options.FleetServer.PolicyID, c.options.FleetServer.Host, c.options.FleetServer.Port, c.options.FleetServer.InternalPort, - c.options.FleetServer.Cert, c.options.FleetServer.CertKey, c.options.FleetServer.ElasticsearchCA, + c.options.FleetServer.Cert, c.options.FleetServer.CertKey, c.options.FleetServer.ElasticsearchCA, c.options.FleetServer.ElasticsearchCASHA256, c.options.FleetServer.Headers, c.options.ProxyURL, c.options.ProxyDisabled, c.options.ProxyHeaders, c.options.FleetServer.ElasticsearchInsecure, @@ -853,7 +855,7 @@ func storeAgentInfo(s saver, reader io.Reader) error { func createFleetServerBootstrapConfig( connStr, serviceToken, policyID, host string, port uint16, internalPort uint16, - cert, key, esCA string, + cert, key, esCA, esCASHA256 string, headers map[string]string, proxyURL string, proxyDisabled bool, @@ -875,6 +877,15 @@ func createFleetServerBootstrapConfig( es.TLS.CAs = []string{esCA} } } + if esCASHA256 != "" { + if es.TLS == nil { + es.TLS = &tlscommon.Config{ + CATrustedFingerprint: esCASHA256, + } + } else { + es.TLS.CATrustedFingerprint = esCASHA256 + } + } if host == "" { host = defaultFleetServerHost } diff --git a/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go b/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go index 6b7e1edf8b3..b33c0f8fa8e 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go @@ -40,12 +40,13 @@ type fleetServerConfig struct { } type elasticsearchConfig struct { - CA string `config:"ca"` - Host string `config:"host"` - Username string `config:"username"` - Password string `config:"password"` - ServiceToken string `config:"service_token"` - Insecure bool `config:"insecure"` + CA string `config:"ca"` + CATrustedFingerprint string `config:"ca_trusted_fingerprint"` + Host string `config:"host"` + Username string `config:"username"` + Password string `config:"password"` + ServiceToken string `config:"service_token"` + Insecure bool `config:"insecure"` } type kibanaConfig struct { @@ -91,12 +92,13 @@ func defaultAccessConfig() (setupConfig, error) { Cert: envWithDefault("", "FLEET_SERVER_CERT"), CertKey: envWithDefault("", "FLEET_SERVER_CERT_KEY"), Elasticsearch: elasticsearchConfig{ - Host: envWithDefault("http://elasticsearch:9200", "FLEET_SERVER_ELASTICSEARCH_HOST", "ELASTICSEARCH_HOST"), - Username: envWithDefault("elastic", "FLEET_SERVER_ELASTICSEARCH_USERNAME", "ELASTICSEARCH_USERNAME"), - Password: envWithDefault("changeme", "FLEET_SERVER_ELASTICSEARCH_PASSWORD", "ELASTICSEARCH_PASSWORD"), - ServiceToken: envWithDefault("", "FLEET_SERVER_SERVICE_TOKEN"), - CA: envWithDefault("", "FLEET_SERVER_ELASTICSEARCH_CA", "ELASTICSEARCH_CA"), - Insecure: envBool("FLEET_SERVER_ELASTICSEARCH_INSECURE"), + Host: envWithDefault("http://elasticsearch:9200", "FLEET_SERVER_ELASTICSEARCH_HOST", "ELASTICSEARCH_HOST"), + Username: envWithDefault("elastic", "FLEET_SERVER_ELASTICSEARCH_USERNAME", "ELASTICSEARCH_USERNAME"), + Password: envWithDefault("changeme", "FLEET_SERVER_ELASTICSEARCH_PASSWORD", "ELASTICSEARCH_PASSWORD"), + ServiceToken: envWithDefault("", "FLEET_SERVER_SERVICE_TOKEN"), + CA: envWithDefault("", "FLEET_SERVER_ELASTICSEARCH_CA", "ELASTICSEARCH_CA"), + CATrustedFingerprint: envWithDefault("", "FLEET_SERVER_ELASTICSEARCH_CA_TRUSTED_FINGERPRINT"), + Insecure: envBool("FLEET_SERVER_ELASTICSEARCH_INSECURE"), }, Enable: envBool("FLEET_SERVER_ENABLE"), Host: envWithDefault("", "FLEET_SERVER_HOST"), From f5e0ec4445a7f9ba2860899e06ddaca16a96ceb0 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Tue, 14 Dec 2021 12:00:58 -0800 Subject: [PATCH 07/57] elastic-agent diagnostics pprof (#28798) * Allow -httpprof to bind to sockets/pipes * Enable pprof debug endpoint on socket for agent and beats Force the elastic-agent and all beats that it starts to run the http/pprof listener on a local socket. * Add new Pprof command to control.proto * Add pprof option to diagnostics collect * Fix linting issues * Add diagonstics pprof command allow pprof to collect from agent * Revert debug socket changes * Cleanup timeout handling Change pprof timeouts from 2*pprofDur to 30s+pprofDur. Remove timeouts from the socket requester client as cancellations for long running requests will be handled by the passed ctx. * Fix linting issue add timeout flag Fix linting issues with new command. Add a timeout flag for when pprof info is gathered. Flag will let users specify the command timeout value. This value whould be greater then the pprof-duration as it needs to gather and process pprof data. * Add more command help text. * Add CHANGELOG * move spec collection for routes to fn * add monitoringCfg reference to control server * elastic-agent server only processes pprof requests when enabled * Fix error message fix commands only on elastic-agent * Add pprof fleet.yml, fix nil reference * Change pprof setting name to monitoring.pprof.enabled Chagne the setting in elastic agent from agent.monioring.pprof to agent.monitoring.pprof.enabled so that policy updates (such as the one that occurs when the agent is starting in fleet mode) do not use the default false value if the user has injected the ssetting into fleet.yml --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 2 + .../_meta/config/common.p2.yml.tmpl | 2 +- .../_meta/config/common.reference.p2.yml.tmpl | 2 +- .../config/elastic-agent.docker.yml.tmpl | 2 +- x-pack/elastic-agent/control.proto | 42 ++ x-pack/elastic-agent/elastic-agent.docker.yml | 2 +- .../elastic-agent/elastic-agent.reference.yml | 2 +- x-pack/elastic-agent/elastic-agent.yml | 2 +- .../handlers/handler_action_policy_change.go | 7 +- .../pkg/agent/cmd/diagnostics.go | 200 ++++++- x-pack/elastic-agent/pkg/agent/cmd/run.go | 3 +- .../pkg/agent/control/client/client.go | 36 ++ .../pkg/agent/control/proto/control.pb.go | 513 +++++++++++++++--- .../pkg/agent/control/server/server.go | 352 +++++++++--- .../core/monitoring/beats/beats_monitor.go | 6 +- .../pkg/core/monitoring/config/config.go | 10 +- 16 files changed, 1031 insertions(+), 152 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index e12cade2afb..374c63acdb0 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -150,6 +150,8 @@ - Add diagnostics command to gather beat metadata. {pull}28265[28265] - Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull}28461[28461] - Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull}28096[28096] +- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull}28983[28983] +- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull}28798[28798] - Allow pprof endpoints for elastic-agent or beats if enabled. {pull}28983[28983] {pull}29155[29155] - Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull}29128[29128] - Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] diff --git a/x-pack/elastic-agent/_meta/config/common.p2.yml.tmpl b/x-pack/elastic-agent/_meta/config/common.p2.yml.tmpl index de16df8ea7f..e8f4c31e8e1 100644 --- a/x-pack/elastic-agent/_meta/config/common.p2.yml.tmpl +++ b/x-pack/elastic-agent/_meta/config/common.p2.yml.tmpl @@ -35,7 +35,7 @@ inputs: # metrics: true # # exposes /debug/pprof/ endpoints # # recommended that these endpoints are only enabled if the monitoring endpoint is set to localhost -# pprof: false +# pprof.enabled: false # # exposes agent metrics using http, by default sockets and named pipes are used # http: # # enables http endpoint diff --git a/x-pack/elastic-agent/_meta/config/common.reference.p2.yml.tmpl b/x-pack/elastic-agent/_meta/config/common.reference.p2.yml.tmpl index 43e48464630..8a3ef077357 100644 --- a/x-pack/elastic-agent/_meta/config/common.reference.p2.yml.tmpl +++ b/x-pack/elastic-agent/_meta/config/common.reference.p2.yml.tmpl @@ -109,7 +109,7 @@ inputs: # metrics: false # # exposes /debug/pprof/ endpoints # # recommended that these endpoints are only enabled if the monitoring endpoint is set to localhost -# pprof: false +# pprof.enabled: false # # exposes agent metrics using http, by default sockets and named pipes are used # http: # # enables http endpoint diff --git a/x-pack/elastic-agent/_meta/config/elastic-agent.docker.yml.tmpl b/x-pack/elastic-agent/_meta/config/elastic-agent.docker.yml.tmpl index 69a80678db8..17201aa6dce 100644 --- a/x-pack/elastic-agent/_meta/config/elastic-agent.docker.yml.tmpl +++ b/x-pack/elastic-agent/_meta/config/elastic-agent.docker.yml.tmpl @@ -109,7 +109,7 @@ inputs: # metrics: false # # exposes /debug/pprof/ endpoints # # recommended that these endpoints are only enabled if the monitoring endpoint is set to localhost -# pprof: false +# pprof.enabled: false # # exposes agent metrics using http, by default sockets and named pipes are used # http: # # enables http endpoint diff --git a/x-pack/elastic-agent/control.proto b/x-pack/elastic-agent/control.proto index 26b6552c395..53168f872ba 100644 --- a/x-pack/elastic-agent/control.proto +++ b/x-pack/elastic-agent/control.proto @@ -29,6 +29,19 @@ enum ActionStatus { FAILURE = 1; } +// pprof endpoint that can be requested. +enum PprofOption { + ALLOCS = 0; + BLOCK = 1; + CMDLINE = 2; + GOROUTINE = 3; + HEAP = 4; + MUTEX = 5; + PROFILE = 6; + THREADCREATE = 7; + TRACE = 8; +} + // Empty message. message Empty { } @@ -128,6 +141,32 @@ message ProcMetaResponse { repeated ProcMeta procs = 1; } +// PprofRequest is a request for pprof data from and http/pprof endpoint. +message PprofRequest { + // The profiles that are requested + repeated PprofOption pprofType = 1; + // A string representing a time.Duration to apply to trace, and profile options. + string traceDuration = 2; + // The application that will be profiled, if empty all applications are profiled. + string appName = 3; + // The route key to match for profiling, if empty all are profiled. + string routeKey = 4; +} + +// PprofResult is the result of a pprof request for a given application/route key. +message PprofResult { + string appName = 1; + string routeKey = 2; + PprofOption pprofType = 3; + bytes result = 4; + string error = 5; +} + +// PprofResponse is a wrapper to return all pprof responses. +message PprofResponse { + repeated PprofResult results = 1; +} + service ElasticAgentControl { // Fetches the currently running version of the Elastic Agent. rpc Version(Empty) returns (VersionResponse); @@ -143,4 +182,7 @@ service ElasticAgentControl { // Gather all running process metadata. rpc ProcMeta(Empty) returns (ProcMetaResponse); + + // Gather requested pprof data from specified applications. + rpc Pprof(PprofRequest) returns (PprofResponse); } diff --git a/x-pack/elastic-agent/elastic-agent.docker.yml b/x-pack/elastic-agent/elastic-agent.docker.yml index 9bf7307aacf..b7d5ff2017e 100644 --- a/x-pack/elastic-agent/elastic-agent.docker.yml +++ b/x-pack/elastic-agent/elastic-agent.docker.yml @@ -109,7 +109,7 @@ inputs: # metrics: false # # exposes /debug/pprof/ endpoints # # recommended that these endpoints are only enabled if the monitoring endpoint is set to localhost -# pprof: false +# pprof.enabled: false # # exposes agent metrics using http, by default sockets and named pipes are used # http: # # enables http endpoint diff --git a/x-pack/elastic-agent/elastic-agent.reference.yml b/x-pack/elastic-agent/elastic-agent.reference.yml index 67922a1d89c..da04df95ea8 100644 --- a/x-pack/elastic-agent/elastic-agent.reference.yml +++ b/x-pack/elastic-agent/elastic-agent.reference.yml @@ -115,7 +115,7 @@ inputs: # metrics: false # # exposes /debug/pprof/ endpoints # # recommended that these endpoints are only enabled if the monitoring endpoint is set to localhost -# pprof: false +# pprof.enabled: false # # exposes agent metrics using http, by default sockets and named pipes are used # http: # # enables http endpoint diff --git a/x-pack/elastic-agent/elastic-agent.yml b/x-pack/elastic-agent/elastic-agent.yml index d40b6518e8d..802df992ba7 100644 --- a/x-pack/elastic-agent/elastic-agent.yml +++ b/x-pack/elastic-agent/elastic-agent.yml @@ -41,7 +41,7 @@ inputs: # metrics: true # # exposes /debug/pprof/ endpoints # # recommended that these endpoints are only enabled if the monitoring endpoint is set to localhost -# pprof: false +# pprof.enabled: false # # exposes agent metrics using http, by default sockets and named pipes are used # http: # # enables http endpoint diff --git a/x-pack/elastic-agent/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go b/x-pack/elastic-agent/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go index f14ec7aea81..e00ccfc844b 100644 --- a/x-pack/elastic-agent/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go +++ b/x-pack/elastic-agent/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go @@ -197,9 +197,10 @@ func fleetToReader(agentInfo *info.AgentInfo, cfg *configuration.Configuration) configToStore := map[string]interface{}{ "fleet": cfg.Fleet, "agent": map[string]interface{}{ - "id": agentInfo.AgentID(), - "logging.level": cfg.Settings.LoggingConfig.Level, - "monitoring.http": cfg.Settings.MonitoringConfig.HTTP, + "id": agentInfo.AgentID(), + "logging.level": cfg.Settings.LoggingConfig.Level, + "monitoring.http": cfg.Settings.MonitoringConfig.HTTP, + "monitoring.pprof": cfg.Settings.MonitoringConfig.Pprof, }, } diff --git a/x-pack/elastic-agent/pkg/agent/cmd/diagnostics.go b/x-pack/elastic-agent/pkg/agent/cmd/diagnostics.go index e90e4ab13c1..b32edf6df2d 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/diagnostics.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/diagnostics.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/configuration" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/control/client" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/control/proto" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/cli" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/config/operations" @@ -63,6 +64,7 @@ func newDiagnosticsCommand(s []string, streams *cli.IOStreams) *cobra.Command { cmd.Flags().String("output", "human", "Output the diagnostics information in either human, json, or yaml (default: human)") cmd.AddCommand(newDiagnosticsCollectCommandWithArgs(s, streams)) + cmd.AddCommand(newDiagnosticsPprofCommandWithArgs(s, streams)) return cmd } @@ -72,7 +74,7 @@ func newDiagnosticsCollectCommandWithArgs(_ []string, streams *cli.IOStreams) *c Use: "collect", Short: "Collect diagnostics information from the elastic-agent and write it to a zip archive.", Long: "Collect diagnostics information from the elastic-agent and write it to a zip archive.\nNote that any credentials will appear in plain text.", - Args: cobra.MaximumNArgs(1), + Args: cobra.MaximumNArgs(3), RunE: func(c *cobra.Command, args []string) error { file, _ := c.Flags().GetString("file") @@ -89,12 +91,58 @@ func newDiagnosticsCollectCommandWithArgs(_ []string, streams *cli.IOStreams) *c return fmt.Errorf("unsupported output: %s", output) } - return diagnosticsCollectCmd(streams, file, output) + pprof, _ := c.Flags().GetBool("pprof") + d, _ := c.Flags().GetDuration("pprof-duration") + // get the command timeout value only if one is set explicitly. + // otherwise a value of 30s + pprof-duration will be used. + var timeout time.Duration + if c.Flags().Changed("timeout") { + timeout, _ = c.Flags().GetDuration("timeout") + } + + return diagnosticsCollectCmd(streams, file, output, pprof, d, timeout) }, } cmd.Flags().StringP("file", "f", "", "name of the output diagnostics zip archive") cmd.Flags().String("output", "yaml", "Output the collected information in either json, or yaml (default: yaml)") // replace output flag with different options + cmd.Flags().Bool("pprof", false, "Collect all pprof data from all running applications.") + cmd.Flags().Duration("pprof-duration", time.Second*30, "The duration to collect trace and profiling data from the debug/pprof endpoints. (default: 30s)") + cmd.Flags().Duration("timeout", time.Second*30, "The timeout for the diagnostics collect command, will be either 30s or 30s+pprof-duration by default. Should be longer then pprof-duration when pprof is enabled as the command needs time to process/archive the response.") + + return cmd +} + +func newDiagnosticsPprofCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "pprof", + Short: "Collect pprof information from a running process.", + Long: "Collect pprof information from the elastic-agent or one of its processes and write to stdout or a file.\nBy default it will gather a 30s profile of the elastic-agent and output on stdout.", + Args: cobra.MaximumNArgs(5), + RunE: func(c *cobra.Command, args []string) error { + file, _ := c.Flags().GetString("file") + pprofType, _ := c.Flags().GetString("pprof-type") + d, _ := c.Flags().GetDuration("pprof-duration") + // get the command timeout value only if one is set explicitly. + // otherwise a value of 30s + pprof-duration will be used. + var timeout time.Duration + if c.Flags().Changed("timeout") { + timeout, _ = c.Flags().GetDuration("timeout") + } + + pprofApp, _ := c.Flags().GetString("pprof-application") + pprofRK, _ := c.Flags().GetString("pprof-route-key") + + return diagnosticsPprofCmd(streams, d, timeout, file, pprofType, pprofApp, pprofRK) + }, + } + + cmd.Flags().StringP("file", "f", "", "name of the output file, stdout if unspecified.") + cmd.Flags().String("pprof-type", "profile", "Collect all pprof data from all running applications. Select one of [allocs, block, cmdline, goroutine, heap, mutex, profile, threadcreate, trace]") + cmd.Flags().Duration("pprof-duration", time.Second*30, "The duration to collect trace and profiling data from the debug/pprof endpoints. (default: 30s)") + cmd.Flags().Duration("timeout", time.Second*60, "The timeout for the pprof collect command, defaults to 30s+pprof-duration by default. Should be longer then pprof-duration as the command needs time to process the response.") + cmd.Flags().String("pprof-application", "elastic-agent", "Application name to collect pprof data from.") + cmd.Flags().String("pprof-route-key", "default", "Route key to collect pprof data from.") return cmd } @@ -127,14 +175,22 @@ func diagnosticCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) er return outputFunc(streams.Out, diag) } -func diagnosticsCollectCmd(streams *cli.IOStreams, fileName, outputFormat string) error { +func diagnosticsCollectCmd(streams *cli.IOStreams, fileName, outputFormat string, pprof bool, pprofDur, cmdTimeout time.Duration) error { err := tryContainerLoadPaths() if err != nil { return err } ctx := handleSignal(context.Background()) - innerCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + // set command timeout to 30s or 30s+pprofDur if no timeout is specified + if cmdTimeout == time.Duration(0) { + cmdTimeout = time.Second * 30 + if pprof { + cmdTimeout += pprofDur + } + + } + innerCtx, cancel := context.WithTimeout(ctx, cmdTimeout) defer cancel() diag, err := getDiagnostics(innerCtx) @@ -151,7 +207,15 @@ func diagnosticsCollectCmd(streams *cli.IOStreams, fileName, outputFormat string return fmt.Errorf("unable to gather config data: %w", err) } - err = createZip(fileName, outputFormat, diag, cfg) + var pprofData map[string][]client.ProcPProf = nil + if pprof { + pprofData, err = getAllPprof(innerCtx, pprofDur) + if err != nil { + return fmt.Errorf("unable to gather pprof data: %w", err) + } + } + + err = createZip(fileName, outputFormat, diag, cfg, pprofData) if err != nil { return fmt.Errorf("unable to create archive %q: %w", fileName, err) } @@ -160,6 +224,68 @@ func diagnosticsCollectCmd(streams *cli.IOStreams, fileName, outputFormat string return nil } +func diagnosticsPprofCmd(streams *cli.IOStreams, dur, cmdTimeout time.Duration, outFile, pType, appName, rk string) error { + pt, ok := proto.PprofOption_value[strings.ToUpper(pType)] + if !ok { + return fmt.Errorf("unknown pprof-type %q, select one of [allocs, block, cmdline, goroutine, heap, mutex, profile, threadcreate, trace]", pType) + } + + // the elastic-agent application does not have a route key + if appName == "elastic-agent" { + rk = "" + } + + ctx := handleSignal(context.Background()) + // set cmdTimeout to 30s+dur if not set. + if cmdTimeout == time.Duration(0) { + cmdTimeout = time.Second*30 + dur + } + innerCtx, cancel := context.WithTimeout(ctx, cmdTimeout) + defer cancel() + + daemon := client.New() + err := daemon.Connect(ctx) + if err != nil { + return err + } + + pprofData, err := daemon.Pprof(innerCtx, dur, []proto.PprofOption{proto.PprofOption(pt)}, appName, rk) + if err != nil { + return err + } + + // validate response + pArr, ok := pprofData[proto.PprofOption_name[pt]] + if !ok { + return fmt.Errorf("route key %q not found in response data (map length: %d)", rk, len(pprofData)) + } + if len(pArr) != 1 { + return fmt.Errorf("pprof type length 1 expected, recieved %d", len(pArr)) + } + res := pArr[0] + + if res.Error != "" { + return fmt.Errorf(res.Error) + } + + // handle result + if outFile != "" { + f, err := os.Create(outFile) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write(res.Result) + if err != nil { + return err + } + fmt.Fprintf(streams.Out, "pprof data written to %s\n", outFile) + return nil + } + _, err = streams.Out.Write(res.Result) + return err +} + func getDiagnostics(ctx context.Context) (DiagnosticsInfo, error) { daemon := client.New() diag := DiagnosticsInfo{} @@ -242,7 +368,7 @@ func gatherConfig() (AgentConfig, error) { // // The passed DiagnosticsInfo and AgentConfig data is written in the specified output format. // Any local log files are collected and copied into the archive. -func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentConfig) error { +func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentConfig, pprof map[string][]client.ProcPProf) error { f, err := os.Create(fileName) if err != nil { return err @@ -298,6 +424,13 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon return closeHandlers(err, zw, f) } + if pprof != nil { + err := zipProfs(zw, pprof) + if err != nil { + return closeHandlers(err, zw, f) + } + } + return closeHandlers(nil, zw, f) } @@ -371,3 +504,58 @@ func closeHandlers(err error, closers ...io.Closer) error { } return mErr.ErrorOrNil() } + +func getAllPprof(ctx context.Context, d time.Duration) (map[string][]client.ProcPProf, error) { + daemon := client.New() + err := daemon.Connect(ctx) + if err != nil { + return nil, err + } + pprofTypes := []proto.PprofOption{ + proto.PprofOption_ALLOCS, + proto.PprofOption_BLOCK, + proto.PprofOption_CMDLINE, + proto.PprofOption_GOROUTINE, + proto.PprofOption_HEAP, + proto.PprofOption_MUTEX, + proto.PprofOption_PROFILE, + proto.PprofOption_THREADCREATE, + proto.PprofOption_TRACE, + } + return daemon.Pprof(ctx, d, pprofTypes, "", "") +} + +func zipProfs(zw *zip.Writer, pprof map[string][]client.ProcPProf) error { + zf, err := zw.Create("pprof/") + if err != nil { + return err + } + for pType, profs := range pprof { + zf, err = zw.Create("pprof/" + pType + "/") + if err != nil { + return err + } + for _, p := range profs { + if p.Error != "" { + zf, err = zw.Create("pprof/" + pType + "/" + p.Name + "_" + p.RouteKey + "_error.txt") + if err != nil { + return err + } + _, err = zf.Write([]byte(p.Error)) + if err != nil { + return err + } + continue + } + zf, err = zw.Create("pprof/" + pType + "/" + p.Name + "_" + p.RouteKey + ".pprof") + if err != nil { + return err + } + _, err = zf.Write(p.Result) + if err != nil { + return err + } + } + } + return nil +} diff --git a/x-pack/elastic-agent/pkg/agent/cmd/run.go b/x-pack/elastic-agent/pkg/agent/cmd/run.go index ba37febd674..7f10f9faa31 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/run.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/run.go @@ -148,6 +148,7 @@ func run(streams *cli.IOStreams, override cfgOverrider) error { } control.SetRouteFn(app.Routes) + control.SetMonitoringCfg(cfg.Settings.MonitoringConfig) serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app) if err != nil { @@ -313,7 +314,7 @@ func setupMetrics(agentInfo *info.AgentInfo, logger *logger.Logger, operatingSys } s.Start() - if cfg.Pprof { + if cfg.Pprof != nil && cfg.Pprof.Enabled { s.AttachPprof() } diff --git a/x-pack/elastic-agent/pkg/agent/control/client/client.go b/x-pack/elastic-agent/pkg/agent/control/client/client.go index f7332b9896e..900718bc6a6 100644 --- a/x-pack/elastic-agent/pkg/agent/control/client/client.go +++ b/x-pack/elastic-agent/pkg/agent/control/client/client.go @@ -72,6 +72,14 @@ type ProcMeta struct { Error string } +// ProcPProf returns pprof data for a process. +type ProcPProf struct { + Name string + RouteKey string + Result []byte + Error string +} + // AgentStatus is the current status of the Elastic Agent. type AgentStatus struct { Status Status @@ -95,6 +103,8 @@ type Client interface { Upgrade(ctx context.Context, version string, sourceURI string) (string, error) // ProcMeta gathers running process meta-data. ProcMeta(ctx context.Context) ([]ProcMeta, error) + // Pprof gathers data from the /debug/pprof/ endpoints specified. + Pprof(ctx context.Context, d time.Duration, pprofTypes []proto.PprofOption, appName, routeKey string) (map[string][]ProcPProf, error) } // client manages the state and communication to the Elastic Agent. @@ -247,3 +257,29 @@ func (c *client) ProcMeta(ctx context.Context) ([]ProcMeta, error) { } return procMeta, nil } + +// Pprof gathers /debug/pprof data and returns a map of pprof-type: ProcPProf data +func (c *client) Pprof(ctx context.Context, d time.Duration, pprofTypes []proto.PprofOption, appName, routeKey string) (map[string][]ProcPProf, error) { + resp, err := c.client.Pprof(ctx, &proto.PprofRequest{ + PprofType: pprofTypes, + TraceDuration: d.String(), + AppName: appName, + RouteKey: routeKey, + }) + if err != nil { + return nil, err + } + res := map[string][]ProcPProf{} + for _, pType := range pprofTypes { + res[pType.String()] = make([]ProcPProf, 0) + } + for _, r := range resp.Results { + res[r.PprofType.String()] = append(res[r.PprofType.String()], ProcPProf{ + Name: r.AppName, + RouteKey: r.RouteKey, + Result: r.Result, + Error: r.Error, + }) + } + return res, nil +} diff --git a/x-pack/elastic-agent/pkg/agent/control/proto/control.pb.go b/x-pack/elastic-agent/pkg/agent/control/proto/control.pb.go index 2cdec52cf65..70c66acd4ab 100644 --- a/x-pack/elastic-agent/pkg/agent/control/proto/control.pb.go +++ b/x-pack/elastic-agent/pkg/agent/control/proto/control.pb.go @@ -143,6 +143,74 @@ func (ActionStatus) EnumDescriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{1} } +// pprof endpoint that can be requested. +type PprofOption int32 + +const ( + PprofOption_ALLOCS PprofOption = 0 + PprofOption_BLOCK PprofOption = 1 + PprofOption_CMDLINE PprofOption = 2 + PprofOption_GOROUTINE PprofOption = 3 + PprofOption_HEAP PprofOption = 4 + PprofOption_MUTEX PprofOption = 5 + PprofOption_PROFILE PprofOption = 6 + PprofOption_THREADCREATE PprofOption = 7 + PprofOption_TRACE PprofOption = 8 +) + +// Enum value maps for PprofOption. +var ( + PprofOption_name = map[int32]string{ + 0: "ALLOCS", + 1: "BLOCK", + 2: "CMDLINE", + 3: "GOROUTINE", + 4: "HEAP", + 5: "MUTEX", + 6: "PROFILE", + 7: "THREADCREATE", + 8: "TRACE", + } + PprofOption_value = map[string]int32{ + "ALLOCS": 0, + "BLOCK": 1, + "CMDLINE": 2, + "GOROUTINE": 3, + "HEAP": 4, + "MUTEX": 5, + "PROFILE": 6, + "THREADCREATE": 7, + "TRACE": 8, + } +) + +func (x PprofOption) Enum() *PprofOption { + p := new(PprofOption) + *p = x + return p +} + +func (x PprofOption) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PprofOption) Descriptor() protoreflect.EnumDescriptor { + return file_control_proto_enumTypes[2].Descriptor() +} + +func (PprofOption) Type() protoreflect.EnumType { + return &file_control_proto_enumTypes[2] +} + +func (x PprofOption) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PprofOption.Descriptor instead. +func (PprofOption) EnumDescriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{2} +} + // Empty message. type Empty struct { state protoimpl.MessageState @@ -807,6 +875,210 @@ func (x *ProcMetaResponse) GetProcs() []*ProcMeta { return nil } +// PprofRequest is a request for pprof data from and http/pprof endpoint. +type PprofRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The profiles that are requested + PprofType []PprofOption `protobuf:"varint,1,rep,packed,name=pprofType,proto3,enum=proto.PprofOption" json:"pprofType,omitempty"` + // A string representing a time.Duration to apply to trace, and profile options. + TraceDuration string `protobuf:"bytes,2,opt,name=traceDuration,proto3" json:"traceDuration,omitempty"` + // The application that will be profiled, if empty all applications are profiled. + AppName string `protobuf:"bytes,3,opt,name=appName,proto3" json:"appName,omitempty"` + // The route key to match for profiling, if empty all are profiled. + RouteKey string `protobuf:"bytes,4,opt,name=routeKey,proto3" json:"routeKey,omitempty"` +} + +func (x *PprofRequest) Reset() { + *x = PprofRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PprofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PprofRequest) ProtoMessage() {} + +func (x *PprofRequest) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PprofRequest.ProtoReflect.Descriptor instead. +func (*PprofRequest) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{9} +} + +func (x *PprofRequest) GetPprofType() []PprofOption { + if x != nil { + return x.PprofType + } + return nil +} + +func (x *PprofRequest) GetTraceDuration() string { + if x != nil { + return x.TraceDuration + } + return "" +} + +func (x *PprofRequest) GetAppName() string { + if x != nil { + return x.AppName + } + return "" +} + +func (x *PprofRequest) GetRouteKey() string { + if x != nil { + return x.RouteKey + } + return "" +} + +// PprofResult is the result of a pprof request for a given application/route key. +type PprofResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AppName string `protobuf:"bytes,1,opt,name=appName,proto3" json:"appName,omitempty"` + RouteKey string `protobuf:"bytes,2,opt,name=routeKey,proto3" json:"routeKey,omitempty"` + PprofType PprofOption `protobuf:"varint,3,opt,name=pprofType,proto3,enum=proto.PprofOption" json:"pprofType,omitempty"` + Result []byte `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` + Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *PprofResult) Reset() { + *x = PprofResult{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PprofResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PprofResult) ProtoMessage() {} + +func (x *PprofResult) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PprofResult.ProtoReflect.Descriptor instead. +func (*PprofResult) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{10} +} + +func (x *PprofResult) GetAppName() string { + if x != nil { + return x.AppName + } + return "" +} + +func (x *PprofResult) GetRouteKey() string { + if x != nil { + return x.RouteKey + } + return "" +} + +func (x *PprofResult) GetPprofType() PprofOption { + if x != nil { + return x.PprofType + } + return PprofOption_ALLOCS +} + +func (x *PprofResult) GetResult() []byte { + if x != nil { + return x.Result + } + return nil +} + +func (x *PprofResult) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// PprofResponse is a wrapper to return all pprof responses. +type PprofResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []*PprofResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *PprofResponse) Reset() { + *x = PprofResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PprofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PprofResponse) ProtoMessage() {} + +func (x *PprofResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PprofResponse.ProtoReflect.Descriptor instead. +func (*PprofResponse) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{11} +} + +func (x *PprofResponse) GetResults() []*PprofResult { + if x != nil { + return x.Results + } + return nil +} + var File_control_proto protoreflect.FileDescriptor var file_control_proto_rawDesc = []byte{ @@ -886,37 +1158,73 @@ var file_control_proto_rawDesc = []byte{ 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x2a, 0x79, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, - 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, - 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, - 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, - 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, - 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, - 0x4b, 0x10, 0x07, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, - 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x32, 0x93, 0x02, - 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x12, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x31, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x0c, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x22, 0x5a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, - 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x0c, 0x50, 0x70, 0x72, + 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x09, 0x70, 0x70, 0x72, + 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, + 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, 0xa3, 0x01, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x30, 0x0a, + 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x12, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3d, 0x0a, + 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, + 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2a, 0x79, 0x0a, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, + 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, + 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, + 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, + 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, + 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, + 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, + 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x07, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, + 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, + 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, + 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, + 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, + 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, + 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, + 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, + 0x10, 0x08, 0x32, 0xc7, 0x02, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x07, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x06, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x52, 0x65, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x55, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, + 0x61, 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x50, 0x70, 0x72, 0x6f, + 0x66, 0x12, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, + 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x22, 0x5a, 0x1d, + 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -931,43 +1239,52 @@ func file_control_proto_rawDescGZIP() []byte { return file_control_proto_rawDescData } -var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 12) var file_control_proto_goTypes = []interface{}{ (Status)(0), // 0: proto.Status (ActionStatus)(0), // 1: proto.ActionStatus - (*Empty)(nil), // 2: proto.Empty - (*VersionResponse)(nil), // 3: proto.VersionResponse - (*RestartResponse)(nil), // 4: proto.RestartResponse - (*UpgradeRequest)(nil), // 5: proto.UpgradeRequest - (*UpgradeResponse)(nil), // 6: proto.UpgradeResponse - (*ApplicationStatus)(nil), // 7: proto.ApplicationStatus - (*ProcMeta)(nil), // 8: proto.ProcMeta - (*StatusResponse)(nil), // 9: proto.StatusResponse - (*ProcMetaResponse)(nil), // 10: proto.ProcMetaResponse + (PprofOption)(0), // 2: proto.PprofOption + (*Empty)(nil), // 3: proto.Empty + (*VersionResponse)(nil), // 4: proto.VersionResponse + (*RestartResponse)(nil), // 5: proto.RestartResponse + (*UpgradeRequest)(nil), // 6: proto.UpgradeRequest + (*UpgradeResponse)(nil), // 7: proto.UpgradeResponse + (*ApplicationStatus)(nil), // 8: proto.ApplicationStatus + (*ProcMeta)(nil), // 9: proto.ProcMeta + (*StatusResponse)(nil), // 10: proto.StatusResponse + (*ProcMetaResponse)(nil), // 11: proto.ProcMetaResponse + (*PprofRequest)(nil), // 12: proto.PprofRequest + (*PprofResult)(nil), // 13: proto.PprofResult + (*PprofResponse)(nil), // 14: proto.PprofResponse } var file_control_proto_depIdxs = []int32{ 1, // 0: proto.RestartResponse.status:type_name -> proto.ActionStatus 1, // 1: proto.UpgradeResponse.status:type_name -> proto.ActionStatus 0, // 2: proto.ApplicationStatus.status:type_name -> proto.Status 0, // 3: proto.StatusResponse.status:type_name -> proto.Status - 7, // 4: proto.StatusResponse.applications:type_name -> proto.ApplicationStatus - 8, // 5: proto.ProcMetaResponse.procs:type_name -> proto.ProcMeta - 2, // 6: proto.ElasticAgentControl.Version:input_type -> proto.Empty - 2, // 7: proto.ElasticAgentControl.Status:input_type -> proto.Empty - 2, // 8: proto.ElasticAgentControl.Restart:input_type -> proto.Empty - 5, // 9: proto.ElasticAgentControl.Upgrade:input_type -> proto.UpgradeRequest - 2, // 10: proto.ElasticAgentControl.ProcMeta:input_type -> proto.Empty - 3, // 11: proto.ElasticAgentControl.Version:output_type -> proto.VersionResponse - 9, // 12: proto.ElasticAgentControl.Status:output_type -> proto.StatusResponse - 4, // 13: proto.ElasticAgentControl.Restart:output_type -> proto.RestartResponse - 6, // 14: proto.ElasticAgentControl.Upgrade:output_type -> proto.UpgradeResponse - 10, // 15: proto.ElasticAgentControl.ProcMeta:output_type -> proto.ProcMetaResponse - 11, // [11:16] is the sub-list for method output_type - 6, // [6:11] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 8, // 4: proto.StatusResponse.applications:type_name -> proto.ApplicationStatus + 9, // 5: proto.ProcMetaResponse.procs:type_name -> proto.ProcMeta + 2, // 6: proto.PprofRequest.pprofType:type_name -> proto.PprofOption + 2, // 7: proto.PprofResult.pprofType:type_name -> proto.PprofOption + 13, // 8: proto.PprofResponse.results:type_name -> proto.PprofResult + 3, // 9: proto.ElasticAgentControl.Version:input_type -> proto.Empty + 3, // 10: proto.ElasticAgentControl.Status:input_type -> proto.Empty + 3, // 11: proto.ElasticAgentControl.Restart:input_type -> proto.Empty + 6, // 12: proto.ElasticAgentControl.Upgrade:input_type -> proto.UpgradeRequest + 3, // 13: proto.ElasticAgentControl.ProcMeta:input_type -> proto.Empty + 12, // 14: proto.ElasticAgentControl.Pprof:input_type -> proto.PprofRequest + 4, // 15: proto.ElasticAgentControl.Version:output_type -> proto.VersionResponse + 10, // 16: proto.ElasticAgentControl.Status:output_type -> proto.StatusResponse + 5, // 17: proto.ElasticAgentControl.Restart:output_type -> proto.RestartResponse + 7, // 18: proto.ElasticAgentControl.Upgrade:output_type -> proto.UpgradeResponse + 11, // 19: proto.ElasticAgentControl.ProcMeta:output_type -> proto.ProcMetaResponse + 14, // 20: proto.ElasticAgentControl.Pprof:output_type -> proto.PprofResponse + 15, // [15:21] is the sub-list for method output_type + 9, // [9:15] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name } func init() { file_control_proto_init() } @@ -1084,14 +1401,50 @@ func file_control_proto_init() { return nil } } + file_control_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PprofRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PprofResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PprofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_control_proto_rawDesc, - NumEnums: 2, - NumMessages: 9, + NumEnums: 3, + NumMessages: 12, NumExtensions: 0, NumServices: 1, }, @@ -1128,6 +1481,8 @@ type ElasticAgentControlClient interface { Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) // Gather all running process metadata. ProcMeta(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetaResponse, error) + // Gather requested pprof data from specified applications. + Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (*PprofResponse, error) } type elasticAgentControlClient struct { @@ -1183,6 +1538,15 @@ func (c *elasticAgentControlClient) ProcMeta(ctx context.Context, in *Empty, opt return out, nil } +func (c *elasticAgentControlClient) Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (*PprofResponse, error) { + out := new(PprofResponse) + err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Pprof", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ElasticAgentControlServer is the server API for ElasticAgentControl service. type ElasticAgentControlServer interface { // Fetches the currently running version of the Elastic Agent. @@ -1195,6 +1559,8 @@ type ElasticAgentControlServer interface { Upgrade(context.Context, *UpgradeRequest) (*UpgradeResponse, error) // Gather all running process metadata. ProcMeta(context.Context, *Empty) (*ProcMetaResponse, error) + // Gather requested pprof data from specified applications. + Pprof(context.Context, *PprofRequest) (*PprofResponse, error) } // UnimplementedElasticAgentControlServer can be embedded to have forward compatible implementations. @@ -1216,6 +1582,9 @@ func (*UnimplementedElasticAgentControlServer) Upgrade(context.Context, *Upgrade func (*UnimplementedElasticAgentControlServer) ProcMeta(context.Context, *Empty) (*ProcMetaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ProcMeta not implemented") } +func (*UnimplementedElasticAgentControlServer) Pprof(context.Context, *PprofRequest) (*PprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Pprof not implemented") +} func RegisterElasticAgentControlServer(s *grpc.Server, srv ElasticAgentControlServer) { s.RegisterService(&_ElasticAgentControl_serviceDesc, srv) @@ -1311,6 +1680,24 @@ func _ElasticAgentControl_ProcMeta_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _ElasticAgentControl_Pprof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Pprof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.ElasticAgentControl/Pprof", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Pprof(ctx, req.(*PprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _ElasticAgentControl_serviceDesc = grpc.ServiceDesc{ ServiceName: "proto.ElasticAgentControl", HandlerType: (*ElasticAgentControlServer)(nil), @@ -1335,6 +1722,10 @@ var _ElasticAgentControl_serviceDesc = grpc.ServiceDesc{ MethodName: "ProcMeta", Handler: _ElasticAgentControl_ProcMeta_Handler, }, + { + MethodName: "Pprof", + Handler: _ElasticAgentControl_Pprof_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "control.proto", diff --git a/x-pack/elastic-agent/pkg/agent/control/server/server.go b/x-pack/elastic-agent/pkg/agent/control/server/server.go index 072b212a771..12ed4650eed 100644 --- a/x-pack/elastic-agent/pkg/agent/control/server/server.go +++ b/x-pack/elastic-agent/pkg/agent/control/server/server.go @@ -7,6 +7,8 @@ package server import ( "context" "encoding/json" + "fmt" + "io" "net" "net/http" "runtime" @@ -23,7 +25,9 @@ import ( "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/errors" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/program" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/monitoring/beats" monitoring "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/monitoring/beats" + monitoringCfg "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/monitoring/config" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/socket" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/status" "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi" @@ -33,20 +37,27 @@ import ( // Server is the daemon side of the control protocol. type Server struct { - logger *logger.Logger - rex reexec.ExecManager - statusCtrl status.Controller - up *upgrade.Upgrader - routeFn func() *sorted.Set - listener net.Listener - server *grpc.Server - lock sync.RWMutex + logger *logger.Logger + rex reexec.ExecManager + statusCtrl status.Controller + up *upgrade.Upgrader + routeFn func() *sorted.Set + monitoringCfg *monitoringCfg.MonitoringConfig + listener net.Listener + server *grpc.Server + lock sync.RWMutex } type specer interface { Specs() map[string]program.Spec } +type specInfo struct { + spec program.Spec + app string + rk string +} + // New creates a new control protocol server. func New(log *logger.Logger, rex reexec.ExecManager, statusCtrl status.Controller, up *upgrade.Upgrader) *Server { return &Server{ @@ -71,6 +82,14 @@ func (s *Server) SetRouteFn(routesFetchFn func() *sorted.Set) { s.routeFn = routesFetchFn } +// SetMonitoringCfg sets a reference to the monitoring config used by the running agent. +// the controller references this config to find out if pprof is enabled for the agent or not +func (s *Server) SetMonitoringCfg(cfg *monitoringCfg.MonitoringConfig) { + s.lock.Lock() + defer s.lock.Unlock() + s.monitoringCfg = cfg +} + // Start starts the GRPC endpoint and accepts new connections. func (s *Server) Start() error { if s.server != nil { @@ -194,84 +213,273 @@ func (s *Server) ProcMeta(ctx context.Context, _ *proto.Empty) (*proto.ProcMetaR Procs: []*proto.ProcMeta{}, } + // gather spec data for all rk/apps running + specs := s.getSpecInfo("", "") + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + client := newSocketRequester(si.app, si.rk, endpoint) + + procMeta := client.procMeta(ctx) + resp.Procs = append(resp.Procs, procMeta) + } + + return resp, nil +} + +// Pprof returns /debug/pprof data for the requested applicaiont-route_key or all running applications. +func (s *Server) Pprof(ctx context.Context, req *proto.PprofRequest) (*proto.PprofResponse, error) { + if s.monitoringCfg == nil || s.monitoringCfg.Pprof == nil || !s.monitoringCfg.Pprof.Enabled { + return nil, fmt.Errorf("agent.monitoring.pprof disabled") + } + + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } + + dur, err := time.ParseDuration(req.TraceDuration) + if err != nil { + return nil, fmt.Errorf("unable to parse trace duration: %w", err) + } + + resp := &proto.PprofResponse{ + Results: []*proto.PprofResult{}, + } + + var wg sync.WaitGroup + ch := make(chan *proto.PprofResult, 1) + + // retrieve elastic-agent pprof data if requested or application is unspecified. + if req.AppName == "" || req.AppName == "elastic-agent" { + endpoint := beats.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester("elastic-agent", "", endpoint) + for _, opt := range req.PprofType { + wg.Add(1) + go func(opt proto.PprofOption) { + res := c.getPprof(ctx, opt, dur) + ch <- res + wg.Done() + }(opt) + } + } + + // get requested rk/appname spec or all specs + var specs []specInfo + if req.AppName != "elastic-agent" { + specs = s.getSpecInfo(req.RouteKey, req.AppName) + } + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + c := newSocketRequester(si.app, si.rk, endpoint) + // Launch a concurrent goroutine to gather all pprof endpoints from a socket. + for _, opt := range req.PprofType { + wg.Add(1) + go func(opt proto.PprofOption) { + res := c.getPprof(ctx, opt, dur) + ch <- res + wg.Done() + }(opt) + } + } + + // wait for the waitgroup to be done and close the channel + go func() { + wg.Wait() + close(ch) + }() + + // gather all results from channel until closed. + for res := range ch { + resp.Results = append(resp.Results, res) + } + return resp, nil +} + +// getSpecs will return the specs for the program associated with the specified route key/app name, or all programs if no key(s) are specified. +// if matchRK or matchApp are empty all results will be returned. +func (s *Server) getSpecInfo(matchRK, matchApp string) []specInfo { routes := s.routeFn() + + // find specInfo for a specified rk/app + if matchRK != "" && matchApp != "" { + programs, ok := routes.Get(matchRK) + if !ok { + s.logger.With("route_key", matchRK).Debug("No matching route key found.") + return []specInfo{} + } + sp, ok := programs.(specer) + if !ok { + s.logger.With("route_key", matchRK, "route", programs).Warn("Unable to cast route as specer.") + return []specInfo{} + } + specs := sp.Specs() + + spec, ok := specs[matchApp] + if !ok { + s.logger.With("route_key", matchRK, "application_name", matchApp).Debug("No matching route key/application name found.") + return []specInfo{} + } + return []specInfo{specInfo{spec: spec, app: matchApp, rk: matchRK}} + } + + // gather specInfo for all rk/app values + res := make([]specInfo, 0) for _, rk := range routes.Keys() { programs, ok := routes.Get(rk) if !ok { + // we do not expect to ever hit this code path + // if this log message occurs then the agent is unable to access one of the keys that is returned by the route function + // might be a race condition if someone tries to update the policy to remove an output? s.logger.With("route_key", rk).Warn("Unable to retrieve route.") continue } - sp, ok := programs.(specer) if !ok { - s.logger.With("route_key", rk, "route", programs).Warn("Unable to cast route as specer.") + s.logger.With("route_key", matchRK, "route", programs).Warn("Unable to cast route as specer.") continue } - specs := sp.Specs() + for n, spec := range sp.Specs() { + res = append(res, specInfo{ + rk: rk, + app: n, + spec: spec, + }) + } + } + return res +} + +// socketRequester is a struct to gather (diagnostics) data from a socket opened by elastic-agent or one if it's processes +type socketRequester struct { + c http.Client + endpoint string + appName string + routeKey string +} - for n, spec := range specs { - procMeta := &proto.ProcMeta{ - Name: n, - RouteKey: rk, - } - - client := http.Client{ - Timeout: time.Second * 5, - } - endpoint := monitoring.MonitoringEndpoint(spec, runtime.GOOS, rk) - if strings.HasPrefix(endpoint, "unix://") { - client.Transport = &http.Transport{ - Proxy: nil, - DialContext: socket.DialContext(strings.TrimPrefix(endpoint, "unix://")), - } - endpoint = "unix" - } else if strings.HasPrefix(endpoint, "npipe://") { - client.Transport = &http.Transport{ - Proxy: nil, - DialContext: socket.DialContext(strings.TrimPrefix(endpoint, "npipe:///")), - } - endpoint = "npipe" - } - - res, err := client.Get("http://" + endpoint + "/") - if err != nil { - procMeta.Error = err.Error() - resp.Procs = append(resp.Procs, procMeta) - continue - } - if res.StatusCode != 200 { - procMeta.Error = "response status is: " + res.Status - resp.Procs = append(resp.Procs, procMeta) - continue - } - - bi := &BeatInfo{} - dec := json.NewDecoder(res.Body) - if err := dec.Decode(bi); err != nil { - res.Body.Close() - procMeta.Error = err.Error() - resp.Procs = append(resp.Procs, procMeta) - continue - } - res.Body.Close() - - procMeta.Process = bi.Beat - procMeta.Hostname = bi.Hostname - procMeta.Id = bi.ID - procMeta.EphemeralId = bi.EphemeralID - procMeta.Version = bi.Version - procMeta.BuildCommit = bi.Commit - procMeta.BuildTime = bi.Time - procMeta.Username = bi.Username - procMeta.UserId = bi.UserID - procMeta.UserGid = bi.GroupID - procMeta.Architecture = bi.BinaryArch - procMeta.ElasticLicensed = bi.ElasticLicensed - - resp.Procs = append(resp.Procs, procMeta) +func newSocketRequester(appName, routeKey, endpoint string) *socketRequester { + c := http.Client{} + if strings.HasPrefix(endpoint, "unix://") { + c.Transport = &http.Transport{ + Proxy: nil, + DialContext: socket.DialContext(strings.TrimPrefix(endpoint, "unix://")), + } + endpoint = "unix" + } else if strings.HasPrefix(endpoint, "npipe://") { + c.Transport = &http.Transport{ + Proxy: nil, + DialContext: socket.DialContext(strings.TrimPrefix(endpoint, "npipe:///")), } + endpoint = "npipe" } - return resp, nil + return &socketRequester{ + c: c, + appName: appName, + routeKey: routeKey, + endpoint: endpoint, + } +} + +// getPath creates a get request for the specified path. +// Will return an error if that status code is not 200. +func (r *socketRequester) getPath(ctx context.Context, path string) (*http.Response, error) { + req, err := http.NewRequest("GET", "http://"+r.endpoint+path, nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + res, err := r.c.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + res.Body.Close() + return nil, fmt.Errorf("response status is %d", res.StatusCode) + } + return res, nil + +} + +// procMeta will return process metadata by querying the "/" path. +func (r *socketRequester) procMeta(ctx context.Context) *proto.ProcMeta { + pm := &proto.ProcMeta{ + Name: r.appName, + RouteKey: r.routeKey, + } + + res, err := r.getPath(ctx, "/") + if err != nil { + pm.Error = err.Error() + return pm + } + defer res.Body.Close() + + bi := &BeatInfo{} + dec := json.NewDecoder(res.Body) + if err := dec.Decode(bi); err != nil { + pm.Error = err.Error() + return pm + } + + pm.Process = bi.Beat + pm.Hostname = bi.Hostname + pm.Id = bi.ID + pm.EphemeralId = bi.EphemeralID + pm.Version = bi.Version + pm.BuildCommit = bi.Commit + pm.BuildTime = bi.Time + pm.Username = bi.Username + pm.UserId = bi.UserID + pm.UserGid = bi.GroupID + pm.Architecture = bi.BinaryArch + pm.ElasticLicensed = bi.ElasticLicensed + + return pm +} + +var pprofEndpoints = map[proto.PprofOption]string{ + proto.PprofOption_ALLOCS: "/debug/pprof/allocs", + proto.PprofOption_BLOCK: "/debug/pprof/block", + proto.PprofOption_CMDLINE: "/debug/pprof/cmdline", + proto.PprofOption_GOROUTINE: "/debug/pprof/goroutine", + proto.PprofOption_HEAP: "/debug/pprof/heap", + proto.PprofOption_MUTEX: "/debug/pprof/mutex", + proto.PprofOption_PROFILE: "/debug/pprof/profile", + proto.PprofOption_THREADCREATE: "/debug/pprof/threadcreate", + proto.PprofOption_TRACE: "/debug/pprof/trace", +} + +// getProf will gather pprof data specified by the option. +func (r *socketRequester) getPprof(ctx context.Context, opt proto.PprofOption, dur time.Duration) *proto.PprofResult { + res := &proto.PprofResult{ + AppName: r.appName, + RouteKey: r.routeKey, + PprofType: opt, + } + + path, ok := pprofEndpoints[opt] + if !ok { + res.Error = "unknown path for option" + return res + } + + if opt == proto.PprofOption_PROFILE || opt == proto.PprofOption_TRACE { + path += fmt.Sprintf("?seconds=%0.f", dur.Seconds()) + } + + resp, err := r.getPath(ctx, path) + if err != nil { + res.Error = err.Error() + return res + } + defer resp.Body.Close() + + p, err := io.ReadAll(resp.Body) + if err != nil { + res.Error = err.Error() + return res + } + res.Result = p + return res } type upgradeRequest struct { diff --git a/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go b/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go index cb0519f9806..939aa89c99d 100644 --- a/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go +++ b/x-pack/elastic-agent/pkg/core/monitoring/beats/beats_monitor.go @@ -35,6 +35,7 @@ type Monitor struct { func NewMonitor(downloadConfig *artifact.Config, monitoringCfg *monitoringConfig.MonitoringConfig, logMetrics bool) *Monitor { if monitoringCfg == nil { monitoringCfg = monitoringConfig.DefaultConfig() + monitoringCfg.Pprof = &monitoringConfig.PprofConfig{Enabled: false} } monitoringCfg.LogMetrics = logMetrics @@ -55,6 +56,9 @@ func (b *Monitor) Reload(rawConfig *config.Config) error { if cfg == nil || cfg.Settings == nil || cfg.Settings.MonitoringConfig == nil { b.config = monitoringConfig.DefaultConfig() } else { + if cfg.Settings.MonitoringConfig.Pprof == nil { + cfg.Settings.MonitoringConfig.Pprof = b.config.Pprof + } b.config = cfg.Settings.MonitoringConfig logMetrics := true if cfg.Settings.LoggingConfig != nil { @@ -123,7 +127,7 @@ func (b *Monitor) EnrichArgs(spec program.Spec, pipelineID string, args []string "-E", "http.enabled=true", "-E", "http.host="+endpoint, ) - if b.config.Pprof { + if b.config.Pprof != nil && b.config.Pprof.Enabled { appendix = append(appendix, "-E", "http.pprof.enabled=true", ) diff --git a/x-pack/elastic-agent/pkg/core/monitoring/config/config.go b/x-pack/elastic-agent/pkg/core/monitoring/config/config.go index 10f220fcc5a..3004561bd86 100644 --- a/x-pack/elastic-agent/pkg/core/monitoring/config/config.go +++ b/x-pack/elastic-agent/pkg/core/monitoring/config/config.go @@ -15,7 +15,7 @@ type MonitoringConfig struct { LogMetrics bool `yaml:"-" config:"-"` HTTP *MonitoringHTTPConfig `yaml:"http" config:"http"` Namespace string `yaml:"namespace" config:"namespace"` - Pprof bool `yaml:"pprof" config:"pprof"` + Pprof *PprofConfig `yaml:"pprof" config:"pprof"` } // MonitoringHTTPConfig is a config defining HTTP endpoint published by agent @@ -27,6 +27,13 @@ type MonitoringHTTPConfig struct { Port int `yaml:"port" config:"port" validate:"min=0,max=65535,nonzero"` } +// PprofConfig is a struct for the pprof enablement flag. +// It is a nil struct by default to allow the agent to use the a value that the user has injected into fleet.yml as the source of truth that is passed to beats +// TODO get this value from Kibana? +type PprofConfig struct { + Enabled bool `yaml:"enabled" config:"enabled"` +} + // DefaultConfig creates a config with pre-set default values. func DefaultConfig() *MonitoringConfig { return &MonitoringConfig{ @@ -39,6 +46,5 @@ func DefaultConfig() *MonitoringConfig { Port: defaultPort, }, Namespace: defaultNamespace, - Pprof: false, } } From 3ab22929a54976ff0f8700a61f1821d5843624f5 Mon Sep 17 00:00:00 2001 From: Shahzad Date: Wed, 15 Dec 2021 00:08:45 +0100 Subject: [PATCH 08/57] [Heartbeat] Add run_once mode in the scheduler (#29282) This PR fixes data stream mode while running heartbeat in run_once mode , before this if you would run heartbeat in run_once mode, it would still push documents to normal heartbeat indexes. This PR also fixes data like tags not being pushed as part of documents. --- heartbeat/README.md | 4 +- heartbeat/beater/heartbeat.go | 85 ++------------ heartbeat/monitors/factory.go | 10 +- heartbeat/monitors/factory_test.go | 9 +- heartbeat/monitors/monitor.go | 50 ++++++-- heartbeat/monitors/monitor_test.go | 12 +- heartbeat/monitors/task.go | 11 +- heartbeat/monitors/task_test.go | 6 +- heartbeat/scheduler/schedjob_test.go | 6 +- heartbeat/scheduler/scheduler.go | 107 ++++++++---------- heartbeat/scheduler/scheduler_test.go | 88 +++++++------- .../publisher/pipeline}/sync_client.go | 28 +++-- .../publisher/pipeline}/sync_client_test.go | 23 +++- .../function/beater/functionbeat.go | 7 +- .../function/provider/provider.go | 5 +- .../function/provider/provider_test.go | 10 +- .../function/provider/registry_test.go | 7 +- .../provider/aws/aws/api_gateway_proxy.go | 6 +- .../provider/aws/aws/cloudwatch_kinesis.go | 6 +- .../provider/aws/aws/cloudwatch_logs.go | 6 +- .../functionbeat/provider/aws/aws/kinesis.go | 6 +- x-pack/functionbeat/provider/aws/aws/sqs.go | 6 +- .../provider/local/local/local.go | 4 +- 23 files changed, 256 insertions(+), 246 deletions(-) rename {x-pack/functionbeat/function/core => libbeat/publisher/pipeline}/sync_client.go (75%) rename {x-pack/functionbeat/function/core => libbeat/publisher/pipeline}/sync_client_test.go (73%) diff --git a/heartbeat/README.md b/heartbeat/README.md index c7eb09fa52f..dad0c6d2bc1 100644 --- a/heartbeat/README.md +++ b/heartbeat/README.md @@ -1,8 +1,8 @@ -# Heartbeat (Experimental) +# Heartbeat Welcome to Heartbeat. -This is a new EXPERIMENTAL beat for testing service availability using PING based on ICMP, TCP or higher level protocols. +This is a beat for testing service availability using PING based on ICMP, TCP or higher level protocols. Ensure that this folder is at the following location: `${GOPATH}/src/github.com/elastic/beats` diff --git a/heartbeat/beater/heartbeat.go b/heartbeat/beater/heartbeat.go index c019e37ea72..0e499ce6f85 100644 --- a/heartbeat/beater/heartbeat.go +++ b/heartbeat/beater/heartbeat.go @@ -20,7 +20,6 @@ package beater import ( "errors" "fmt" - "sync" "syscall" "time" @@ -28,7 +27,6 @@ import ( "github.com/elastic/beats/v7/heartbeat/hbregistry" "github.com/elastic/beats/v7/heartbeat/monitors" "github.com/elastic/beats/v7/heartbeat/monitors/plugin" - "github.com/elastic/beats/v7/heartbeat/monitors/stdfields" "github.com/elastic/beats/v7/heartbeat/scheduler" "github.com/elastic/beats/v7/libbeat/autodiscover" "github.com/elastic/beats/v7/libbeat/beat" @@ -37,7 +35,6 @@ import ( "github.com/elastic/beats/v7/libbeat/common/reload" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/management" - "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" _ "github.com/elastic/beats/v7/heartbeat/security" _ "github.com/elastic/beats/v7/libbeat/processors/script" @@ -71,14 +68,14 @@ func New(b *beat.Beat, rawConfig *common.Config) (beat.Beater, error) { } jobConfig := parsedConfig.Jobs - scheduler := scheduler.NewWithLocation(limit, hbregistry.SchedulerRegistry, location, jobConfig) + scheduler := scheduler.Create(limit, hbregistry.SchedulerRegistry, location, jobConfig, parsedConfig.RunOnce) bt := &Heartbeat{ done: make(chan struct{}), config: parsedConfig, scheduler: scheduler, // dynamicFactory is the factory used for dynamic configs, e.g. autodiscover / reload - dynamicFactory: monitors.NewFactory(b.Info, scheduler, plugin.GlobalPluginsReg), + dynamicFactory: monitors.NewFactory(b.Info, scheduler.Add, plugin.GlobalPluginsReg, parsedConfig.RunOnce), } return bt, nil } @@ -89,20 +86,21 @@ func (bt *Heartbeat) Run(b *beat.Beat) error { groups, _ := syscall.Getgroups() logp.Info("Effective user/group ids: %d/%d, with groups: %v", syscall.Geteuid(), syscall.Getegid(), groups) - if bt.config.RunOnce { - err := bt.runRunOnce(b) - if err != nil { - return err - } - return nil - } - + // It is important this appear before we check for run once mode + // In run once mode we depend on these monitors being loaded, but not other more + // dynamic types. stopStaticMonitors, err := bt.RunStaticMonitors(b) if err != nil { return err } defer stopStaticMonitors() + if bt.config.RunOnce { + bt.scheduler.WaitForRunOnce() + logp.Info("Ending run_once run") + return nil + } + if b.Manager.Enabled() { bt.RunCentralMgmtMonitors(b) } @@ -127,9 +125,6 @@ func (bt *Heartbeat) Run(b *beat.Beat) error { defer bt.autodiscover.Stop() } - if err := bt.scheduler.Start(); err != nil { - return err - } defer bt.scheduler.Stop() <-bt.done @@ -138,64 +133,6 @@ func (bt *Heartbeat) Run(b *beat.Beat) error { return nil } -// runRunOnce runs the given config then exits immediately after any queued events have been sent to ES -func (bt *Heartbeat) runRunOnce(b *beat.Beat) error { - logp.Info("Starting run_once run. This is an experimental feature and may be changed or removed in the future!") - - publishClient, err := core.NewSyncClient(logp.NewLogger("run_once mode"), b.Publisher, beat.ClientConfig{}) - if err != nil { - return fmt.Errorf("could not create sync client: %w", err) - } - defer publishClient.Close() - - wg := &sync.WaitGroup{} - for _, cfg := range bt.config.Monitors { - err := runRunOnceSingleConfig(cfg, publishClient, wg) - if err != nil { - logp.Warn("error running run_once config: %s", err) - } - } - - wg.Wait() - publishClient.Wait() - - logp.Info("Ending run_once run") - - return nil -} - -func runRunOnceSingleConfig(cfg *common.Config, publishClient *core.SyncClient, wg *sync.WaitGroup) (err error) { - sf, err := stdfields.ConfigToStdMonitorFields(cfg) - if err != nil { - return fmt.Errorf("could not get stdmon fields: %w", err) - } - pluginFactory, exists := plugin.GlobalPluginsReg.Get(sf.Type) - if !exists { - return fmt.Errorf("no plugin for type: %s", sf.Type) - } - plugin, err := pluginFactory.Make(sf.Type, cfg) - if err != nil { - return err - } - - results := plugin.RunWrapped(sf) - - wg.Add(1) - go func() { - defer wg.Done() - defer plugin.Close() - for { - event := <-results - if event == nil { - break - } - publishClient.Publish(*event) - } - }() - - return nil -} - // RunStaticMonitors runs the `heartbeat.monitors` portion of the yaml config if present. func (bt *Heartbeat) RunStaticMonitors(b *beat.Beat) (stop func(), err error) { var runners []cfgfile.Runner diff --git a/heartbeat/monitors/factory.go b/heartbeat/monitors/factory.go index 7f660cbb087..11a3b2d9ecd 100644 --- a/heartbeat/monitors/factory.go +++ b/heartbeat/monitors/factory.go @@ -40,11 +40,12 @@ import ( // suitable for config reloading. type RunnerFactory struct { info beat.Info - sched *scheduler.Scheduler + addTask scheduler.AddTask byId map[string]*Monitor mtx *sync.Mutex pluginsReg *plugin.PluginsReg logger *logp.Logger + runOnce bool } type publishSettings struct { @@ -67,14 +68,15 @@ type publishSettings struct { } // NewFactory takes a scheduler and creates a RunnerFactory that can create cfgfile.Runner(Monitor) objects. -func NewFactory(info beat.Info, sched *scheduler.Scheduler, pluginsReg *plugin.PluginsReg) *RunnerFactory { +func NewFactory(info beat.Info, addTask scheduler.AddTask, pluginsReg *plugin.PluginsReg, runOnce bool) *RunnerFactory { return &RunnerFactory{ info: info, - sched: sched, + addTask: addTask, byId: map[string]*Monitor{}, mtx: &sync.Mutex{}, pluginsReg: pluginsReg, logger: logp.NewLogger("monitor-factory"), + runOnce: runOnce, } } @@ -116,7 +118,7 @@ func (f *RunnerFactory) Create(p beat.Pipeline, c *common.Config) (cfgfile.Runne } }() } - monitor, err := newMonitor(c, f.pluginsReg, p, f.sched, safeStop) + monitor, err := newMonitor(c, f.pluginsReg, p, f.addTask, safeStop, f.runOnce) if err != nil { return nil, err } diff --git a/heartbeat/monitors/factory_test.go b/heartbeat/monitors/factory_test.go index 4849529cec4..c395050aaa1 100644 --- a/heartbeat/monitors/factory_test.go +++ b/heartbeat/monitors/factory_test.go @@ -20,6 +20,7 @@ package monitors import ( "regexp" "testing" + "time" "github.com/stretchr/testify/require" @@ -153,12 +154,10 @@ func TestDuplicateMonitorIDs(t *testing.T) { reg, built, closed := mockPluginsReg() pipelineConnector := &MockPipelineConnector{} - sched := scheduler.New(1, monitoring.NewRegistry()) - err := sched.Start() - require.NoError(t, err) + sched := scheduler.Create(1, monitoring.NewRegistry(), time.Local, nil, false) defer sched.Stop() - f := NewFactory(binfo, sched, reg) + f := NewFactory(binfo, sched.Add, reg, false) makeTestMon := func() (*Monitor, error) { mIface, err := f.Create(pipelineConnector, serverMonConf) if mIface == nil { @@ -169,7 +168,7 @@ func TestDuplicateMonitorIDs(t *testing.T) { } // Ensure that an error is returned on a bad config - _, m0Err := newMonitor(badConf, reg, pipelineConnector, sched, nil) + _, m0Err := newMonitor(badConf, reg, pipelineConnector, sched.Add, nil, false) require.Error(t, m0Err) // Would fail if the previous newMonitor didn't free the monitor.id diff --git a/heartbeat/monitors/monitor.go b/heartbeat/monitors/monitor.go index 9cdbb8ecfd6..669579e31aa 100644 --- a/heartbeat/monitors/monitor.go +++ b/heartbeat/monitors/monitor.go @@ -32,6 +32,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" ) // ErrMonitorDisabled is returned when the monitor plugin is marked as disabled. @@ -43,13 +44,19 @@ const ( MON_STOPPED ) +type WrappedClient struct { + Publish func(event beat.Event) + Close func() error + wait func() +} + // Monitor represents a configured recurring monitoring configuredJob loaded from a config file. Starting it // will cause it to run with the given scheduler until Stop() is called. type Monitor struct { stdFields stdfields.StdMonitorFields pluginName string config *common.Config - scheduler *scheduler.Scheduler + addTask scheduler.AddTask configuredJobs []*configuredJob enabled bool state int @@ -65,6 +72,8 @@ type Monitor struct { // stats is the countersRecorder used to record lifecycle events // for global metrics + telemetry stats plugin.RegistryRecorder + + runOnce bool } // String prints a description of the monitor in a threadsafe way. It is important that this use threadsafe @@ -74,7 +83,7 @@ func (m *Monitor) String() string { } func checkMonitorConfig(config *common.Config, registrar *plugin.PluginsReg) error { - _, err := newMonitor(config, registrar, nil, nil, nil) + _, err := newMonitor(config, registrar, nil, nil, nil, false) return err } @@ -85,10 +94,11 @@ func newMonitor( config *common.Config, registrar *plugin.PluginsReg, pipelineConnector beat.PipelineConnector, - scheduler *scheduler.Scheduler, + taskAdder scheduler.AddTask, onStop func(*Monitor), + runOnce bool, ) (*Monitor, error) { - m, err := newMonitorUnsafe(config, registrar, pipelineConnector, scheduler, onStop) + m, err := newMonitorUnsafe(config, registrar, pipelineConnector, taskAdder, onStop, runOnce) if m != nil && err != nil { m.Stop() } @@ -101,8 +111,9 @@ func newMonitorUnsafe( config *common.Config, registrar *plugin.PluginsReg, pipelineConnector beat.PipelineConnector, - scheduler *scheduler.Scheduler, + addTask scheduler.AddTask, onStop func(*Monitor), + runOnce bool, ) (*Monitor, error) { // Extract just the Id, Type, and Enabled fields from the config // We'll parse things more precisely later once we know what exact type of @@ -124,13 +135,14 @@ func newMonitorUnsafe( m := &Monitor{ stdFields: standardFields, pluginName: pluginFactory.Name, - scheduler: scheduler, + addTask: addTask, configuredJobs: []*configuredJob{}, pipelineConnector: pipelineConnector, internalsMtx: sync.Mutex{}, config: config, stats: pluginFactory.Stats, state: MON_INIT, + runOnce: runOnce, } if m.stdFields.ID == "" { @@ -211,7 +223,31 @@ func (m *Monitor) Start() { defer m.internalsMtx.Unlock() for _, t := range m.configuredJobs { - t.Start() + if m.runOnce { + client, err := pipeline.NewSyncClient(logp.NewLogger("monitor_task"), t.monitor.pipelineConnector, beat.ClientConfig{}) + if err != nil { + logp.Err("could not start monitor: %v", err) + continue + } + t.Start(&WrappedClient{ + Publish: func(event beat.Event) { + client.Publish(event) + }, + Close: client.Close, + wait: client.Wait, + }) + } else { + client, err := m.pipelineConnector.Connect() + if err != nil { + logp.Err("could not start monitor: %v", err) + continue + } + t.Start(&WrappedClient{ + Publish: client.Publish, + Close: client.Close, + wait: func() {}, + }) + } } m.stats.StartMonitor(int64(m.endpoints)) diff --git a/heartbeat/monitors/monitor_test.go b/heartbeat/monitors/monitor_test.go index 9a0962ef8b2..bbcd5b9b74c 100644 --- a/heartbeat/monitors/monitor_test.go +++ b/heartbeat/monitors/monitor_test.go @@ -34,12 +34,10 @@ func TestMonitor(t *testing.T) { reg, built, closed := mockPluginsReg() pipelineConnector := &MockPipelineConnector{} - sched := scheduler.New(1, monitoring.NewRegistry()) - err := sched.Start() - require.NoError(t, err) + sched := scheduler.Create(1, monitoring.NewRegistry(), time.Local, nil, false) defer sched.Stop() - mon, err := newMonitor(serverMonConf, reg, pipelineConnector, sched, nil) + mon, err := newMonitor(serverMonConf, reg, pipelineConnector, sched.Add, nil, false) require.NoError(t, err) mon.Start() @@ -83,12 +81,10 @@ func TestCheckInvalidConfig(t *testing.T) { reg, built, closed := mockPluginsReg() pipelineConnector := &MockPipelineConnector{} - sched := scheduler.New(1, monitoring.NewRegistry()) - err := sched.Start() - require.NoError(t, err) + sched := scheduler.Create(1, monitoring.NewRegistry(), time.Local, nil, false) defer sched.Stop() - m, err := newMonitor(serverMonConf, reg, pipelineConnector, sched, nil) + m, err := newMonitor(serverMonConf, reg, pipelineConnector, sched.Add, nil, false) require.Error(t, err) // This could change if we decide the contract for newMonitor should always return a monitor require.Nil(t, m, "For this test to work we need a nil value for the monitor.") diff --git a/heartbeat/monitors/task.go b/heartbeat/monitors/task.go index d9794da16d1..11b013a9871 100644 --- a/heartbeat/monitors/task.go +++ b/heartbeat/monitors/task.go @@ -37,7 +37,7 @@ type configuredJob struct { config jobConfig monitor *Monitor cancelFn context.CancelFunc - client beat.Client + client *WrappedClient } func newConfiguredJob(job jobs.Job, config jobConfig, monitor *Monitor) (*configuredJob, error) { @@ -74,17 +74,18 @@ func (t *configuredJob) makeSchedulerTaskFunc() scheduler.TaskFunc { } // Start schedules this configuredJob for execution. -func (t *configuredJob) Start() { +func (t *configuredJob) Start(client *WrappedClient) { var err error - t.client, err = t.monitor.pipelineConnector.Connect() + t.client = client + if err != nil { logp.Err("could not start monitor: %v", err) return } tf := t.makeSchedulerTaskFunc() - t.cancelFn, err = t.monitor.scheduler.Add(t.config.Schedule, t.monitor.stdFields.ID, tf, t.config.Type) + t.cancelFn, err = t.monitor.addTask(t.config.Schedule, t.monitor.stdFields.ID, tf, t.config.Type, client.wait) if err != nil { logp.Err("could not start monitor: %v", err) } @@ -100,7 +101,7 @@ func (t *configuredJob) Stop() { } } -func runPublishJob(job jobs.Job, client beat.Client) []scheduler.TaskFunc { +func runPublishJob(job jobs.Job, client *WrappedClient) []scheduler.TaskFunc { event := &beat.Event{ Fields: common.MapStr{}, } diff --git a/heartbeat/monitors/task_test.go b/heartbeat/monitors/task_test.go index dc0aa7ab23c..64e86b35b88 100644 --- a/heartbeat/monitors/task_test.go +++ b/heartbeat/monitors/task_test.go @@ -96,7 +96,11 @@ func Test_runPublishJob(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { client := &MockBeatClient{} - queue := runPublishJob(tc.job, client) + queue := runPublishJob(tc.job, &WrappedClient{ + Publish: client.Publish, + Close: client.Close, + wait: func() {}, + }) for { if len(queue) == 0 { break diff --git a/heartbeat/scheduler/schedjob_test.go b/heartbeat/scheduler/schedjob_test.go index 48f4bf5a18b..4a6f5e892af 100644 --- a/heartbeat/scheduler/schedjob_test.go +++ b/heartbeat/scheduler/schedjob_test.go @@ -63,7 +63,7 @@ func TestSchedJobRun(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { limit := int64(100) - s := NewWithLocation(limit, monitoring.NewRegistry(), tarawaTime(), nil) + s := Create(limit, monitoring.NewRegistry(), tarawaTime(), nil, false) if testCase.overLimit { s.limitSem.Acquire(context.Background(), limit) @@ -98,9 +98,9 @@ func TestSchedJobRun(t *testing.T) { // testRecursiveForkingJob tests that a schedJob that splits into multiple parallel pieces executes without error func TestRecursiveForkingJob(t *testing.T) { - s := NewWithLocation(1000, monitoring.NewRegistry(), tarawaTime(), map[string]config.JobLimit{ + s := Create(1000, monitoring.NewRegistry(), tarawaTime(), map[string]config.JobLimit{ "atype": {Limit: 1}, - }) + }, false) ran := batomic.NewInt(0) var terminalTf TaskFunc = func(ctx context.Context) []TaskFunc { diff --git a/heartbeat/scheduler/scheduler.go b/heartbeat/scheduler/scheduler.go index 3b6d34fa579..8de2d5760d6 100644 --- a/heartbeat/scheduler/scheduler.go +++ b/heartbeat/scheduler/scheduler.go @@ -22,23 +22,17 @@ import ( "errors" "fmt" "math" + "sync" "time" "golang.org/x/sync/semaphore" "github.com/elastic/beats/v7/heartbeat/config" "github.com/elastic/beats/v7/heartbeat/scheduler/timerqueue" - "github.com/elastic/beats/v7/libbeat/common/atomic" "github.com/elastic/beats/v7/libbeat/logp" "github.com/elastic/beats/v7/libbeat/monitoring" ) -const ( - statePreRunning int = iota + 1 - stateRunning - stateStopped -) - var debugf = logp.MakeDebug("scheduler") // ErrInvalidTransition is returned from start/stop when making an invalid state transition, say from preRunning to stopped @@ -48,20 +42,20 @@ var ErrInvalidTransition = fmt.Errorf("invalid state transition") type Scheduler struct { limit int64 limitSem *semaphore.Weighted - state atomic.Int location *time.Location timerQueue *timerqueue.TimerQueue ctx context.Context cancelCtx context.CancelFunc stats schedulerStats jobLimitSem map[string]*semaphore.Weighted + runOnce bool + runOnceWg *sync.WaitGroup } type schedulerStats struct { activeJobs *monitoring.Uint // gauge showing number of active jobs activeTasks *monitoring.Uint // gauge showing number of active tasks waitingTasks *monitoring.Uint // number of tasks waiting to run, but constrained by scheduler limit - jobsPerSecond *monitoring.Uint // rate of job processing computed over the past hour jobsMissedDeadline *monitoring.Uint // counter for number of jobs that missed start deadline } @@ -88,13 +82,8 @@ func getJobLimitSem(jobLimitByType map[string]config.JobLimit) map[string]*semap return jobLimitSem } -// New creates a new Scheduler -func New(limit int64, registry *monitoring.Registry) *Scheduler { - return NewWithLocation(limit, registry, time.Local, nil) -} - // NewWithLocation creates a new Scheduler using the given runAt zone. -func NewWithLocation(limit int64, registry *monitoring.Registry, location *time.Location, jobLimitByType map[string]config.JobLimit) *Scheduler { +func Create(limit int64, registry *monitoring.Registry, location *time.Location, jobLimitByType map[string]config.JobLimit, runOnce bool) *Scheduler { ctx, cancelCtx := context.WithCancel(context.Background()) if limit < 1 { @@ -109,12 +98,13 @@ func NewWithLocation(limit int64, registry *monitoring.Registry, location *time. sched := &Scheduler{ limit: limit, location: location, - state: atomic.MakeInt(statePreRunning), ctx: ctx, cancelCtx: cancelCtx, limitSem: semaphore.NewWeighted(limit), jobLimitSem: getJobLimitSem(jobLimitByType), timerQueue: timerqueue.NewTimerQueue(ctx), + runOnce: runOnce, + runOnceWg: &sync.WaitGroup{}, stats: schedulerStats{ activeJobs: activeJobsGauge, @@ -124,24 +114,10 @@ func NewWithLocation(limit int64, registry *monitoring.Registry, location *time. }, } - return sched -} - -// Start the scheduler. Starting a stopped scheduler returns an error. -func (s *Scheduler) Start() error { - if s.state.Load() == stateStopped { - return ErrInvalidTransition - } - if !s.state.CAS(statePreRunning, stateRunning) { - return nil // we already running, just exit - } - - s.timerQueue.Start() + sched.timerQueue.Start() + go sched.missedDeadlineReporter() - // Missed deadline reporter - go s.missedDeadlineReporter() - - return nil + return sched } func (s *Scheduler) missedDeadlineReporter() { @@ -160,7 +136,7 @@ func (s *Scheduler) missedDeadlineReporter() { missingNow := s.stats.jobsMissedDeadline.Get() missedDelta := missingNow - missedAtLastCheck if missedDelta > 0 { - logp.Warn("%d tasks have missed their schedule deadlines in the last %s.", missedDelta, interval) + logp.Warn("%d tasks have missed their schedule deadlines by more than 1 second in the last %s.", missedDelta, interval) } missedAtLastCheck = missingNow } @@ -168,25 +144,28 @@ func (s *Scheduler) missedDeadlineReporter() { } // Stop all executing tasks in the scheduler. Cannot be restarted after Stop. -func (s *Scheduler) Stop() error { - if s.state.CAS(stateRunning, stateStopped) { - s.cancelCtx() - return nil - } else if s.state.Load() == stateStopped { - return nil - } +func (s *Scheduler) Stop() { + s.cancelCtx() +} - return ErrInvalidTransition +// Wait until all tasks are done if run in runOnce mode. Will block forever +// if this scheduler does not have the runOnce option set. +// Adding new tasks after this method is invoked is not supported. +func (s *Scheduler) WaitForRunOnce() { + s.runOnceWg.Wait() + s.Stop() } // ErrAlreadyStopped is returned when an Add operation is attempted after the scheduler // has already stopped. var ErrAlreadyStopped = errors.New("attempted to add job to already stopped scheduler") +type AddTask func(sched Schedule, id string, entrypoint TaskFunc, jobType string, waitForPublish func()) (removeFn context.CancelFunc, err error) + // Add adds the given TaskFunc to the current scheduler. Will return an error if the scheduler // is done. -func (s *Scheduler) Add(sched Schedule, id string, entrypoint TaskFunc, jobType string) (removeFn context.CancelFunc, err error) { - if s.state.Load() == stateStopped { +func (s *Scheduler) Add(sched Schedule, id string, entrypoint TaskFunc, jobType string, waitForPublish func()) (removeFn context.CancelFunc, err error) { + if errors.Is(s.ctx.Err(), context.Canceled) { return nil, ErrAlreadyStopped } @@ -207,20 +186,31 @@ func (s *Scheduler) Add(sched Schedule, id string, entrypoint TaskFunc, jobType } s.stats.activeJobs.Inc() debugf("Job '%s' started", id) - lastRanAt := newSchedJob(jobCtx, s, id, jobType, entrypoint).run() + sj := newSchedJob(jobCtx, s, id, jobType, entrypoint) + + lastRanAt := sj.run() s.stats.activeJobs.Dec() - s.runOnce(sched.Next(lastRanAt), taskFn) + + if s.runOnce { + waitForPublish() + s.runOnceWg.Done() + } else { + // Schedule the next run + s.runTaskOnce(sched.Next(lastRanAt), taskFn, true) + } debugf("Job '%v' returned at %v", id, time.Now()) } - // We skip using the scheduler to execute the initial tasks for jobs that have RunOnInit returning true. - // You might think it'd be simpler to just invoke runOnce in either case with 0 as a lastRanAt value, - // however, that would caused the missed deadline stats to be incremented. Given that, it's easier - // and slightly more efficient to simply run these tasks immediately in a goroutine. - if sched.RunOnInit() { - go taskFn(time.Now()) + if s.runOnce { + s.runOnceWg.Add(1) + } + + // Run non-cron tasks immediately, or run all tasks immediately if we're + // in RunOnce mode + if s.runOnce || sched.RunOnInit() { + s.runTaskOnce(time.Now(), taskFn, false) } else { - s.runOnce(sched.Next(lastRanAt), taskFn) + s.runTaskOnce(sched.Next(lastRanAt), taskFn, true) } return func() { @@ -229,15 +219,18 @@ func (s *Scheduler) Add(sched Schedule, id string, entrypoint TaskFunc, jobType }, nil } -func (s *Scheduler) runOnce(runAt time.Time, taskFn timerqueue.TimerTaskFn) { +// runTaskOnce runs the given task exactly once at the given time. Set deadlineCheck +// to false if this is the first invocation of this, otherwise the deadline checker +// will complain about a missed task +func (s *Scheduler) runTaskOnce(runAt time.Time, taskFn timerqueue.TimerTaskFn, deadlineCheck bool) { now := time.Now().In(s.location) - if runAt.Before(now) { - // Our last invocation went long! + // Check if the task is more than 1 second late + if deadlineCheck && runAt.Sub(now) < time.Second { s.stats.jobsMissedDeadline.Inc() } // Schedule task to run sometime in the future. Wrap the task in a go-routine so it doesn't - // block the timer thread. + // blocks the timer thread. asyncTask := func(now time.Time) { go taskFn(now) } s.timerQueue.Push(runAt, asyncTask) } diff --git a/heartbeat/scheduler/scheduler_test.go b/heartbeat/scheduler/scheduler_test.go index 61c062184a4..371be5f69ef 100644 --- a/heartbeat/scheduler/scheduler_test.go +++ b/heartbeat/scheduler/scheduler_test.go @@ -44,14 +44,8 @@ func tarawaTime() *time.Location { return loc } -func TestNew(t *testing.T) { - scheduler := New(123, monitoring.NewRegistry()) - assert.Equal(t, int64(123), scheduler.limit) - assert.Equal(t, time.Local, scheduler.location) -} - func TestNewWithLocation(t *testing.T) { - scheduler := NewWithLocation(123, monitoring.NewRegistry(), tarawaTime(), nil) + scheduler := Create(123, monitoring.NewRegistry(), tarawaTime(), nil, false) assert.Equal(t, int64(123), scheduler.limit) assert.Equal(t, tarawaTime(), scheduler.location) } @@ -83,23 +77,23 @@ func testTaskTimes(limit uint32, fn TaskFunc) TaskFunc { } } -func TestScheduler_Start(t *testing.T) { +func TestSchedulerRun(t *testing.T) { // We use tarawa runAt because it could expose some weird runAt math if by accident some code // relied on the local TZ. - s := NewWithLocation(10, monitoring.NewRegistry(), tarawaTime(), nil) + s := Create(10, monitoring.NewRegistry(), tarawaTime(), nil, false) defer s.Stop() executed := make(chan string) - preAddEvents := uint32(10) - s.Add(testSchedule{0}, "preAdd", testTaskTimes(preAddEvents, func(_ context.Context) []TaskFunc { - executed <- "preAdd" + initialEvents := uint32(10) + s.Add(testSchedule{0}, "add", testTaskTimes(initialEvents, func(_ context.Context) []TaskFunc { + executed <- "initial" cont := func(_ context.Context) []TaskFunc { - executed <- "preAddCont" + executed <- "initialCont" return nil } return []TaskFunc{cont} - }), "http") + }), "http", nil) removedEvents := uint32(1) // This function will be removed after being invoked once @@ -114,28 +108,26 @@ func TestScheduler_Start(t *testing.T) { } // Attempt to execute this twice to see if remove() had any effect removeMtx.Lock() - remove, err := s.Add(testSchedule{}, "removed", testTaskTimes(removedEvents+1, testFn), "http") + remove, err := s.Add(testSchedule{}, "removed", testTaskTimes(removedEvents+1, testFn), "http", nil) require.NoError(t, err) require.NotNil(t, remove) removeMtx.Unlock() - s.Start() - - postAddEvents := uint32(10) - s.Add(testSchedule{}, "postAdd", testTaskTimes(postAddEvents, func(_ context.Context) []TaskFunc { - executed <- "postAdd" + postRemoveEvents := uint32(10) + s.Add(testSchedule{}, "postRemove", testTaskTimes(postRemoveEvents, func(_ context.Context) []TaskFunc { + executed <- "postRemove" cont := func(_ context.Context) []TaskFunc { - executed <- "postAddCont" + executed <- "postRemoveCont" return nil } return []TaskFunc{cont} - }), "http") + }), "http", nil) received := make([]string, 0) // We test for a good number of events in this loop because we want to ensure that the remove() took effect - // Otherwise, we might only do 1 preAdd and 1 postAdd event + // Otherwise, we might only do 1 preAdd and 1 postRemove event // We double the number of pre/post add events to account for their continuations - totalExpected := preAddEvents*2 + removedEvents + postAddEvents*2 + totalExpected := initialEvents*2 + removedEvents + postRemoveEvents*2 for uint32(len(received)) < totalExpected { select { case got := <-executed: @@ -147,31 +139,53 @@ func TestScheduler_Start(t *testing.T) { } // The removed callback should only have been executed once - counts := map[string]uint32{"preAdd": 0, "postAdd": 0, "preAddCont": 0, "postAddcont": 0, "removed": 0} + counts := map[string]uint32{"initial": 0, "initialCont": 0, "removed": 0, "postRemove": 0, "postRemoveCont": 0} for _, s := range received { counts[s]++ } // convert with int() because the printed output is nicer than hex - assert.Equal(t, int(preAddEvents), int(counts["preAdd"])) - assert.Equal(t, int(preAddEvents), int(counts["preAddCont"])) - assert.Equal(t, int(postAddEvents), int(counts["postAdd"])) - assert.Equal(t, int(postAddEvents), int(counts["postAddCont"])) + assert.Equal(t, int(initialEvents), int(counts["initial"])) + assert.Equal(t, int(initialEvents), int(counts["initialCont"])) assert.Equal(t, int(removedEvents), int(counts["removed"])) + assert.Equal(t, int(postRemoveEvents), int(counts["postRemove"])) + assert.Equal(t, int(postRemoveEvents), int(counts["postRemoveCont"])) +} + +func TestScheduler_WaitForRunOnce(t *testing.T) { + s := Create(10, monitoring.NewRegistry(), tarawaTime(), nil, true) + + defer s.Stop() + + executed := new(uint32) + waits := new(uint32) + + s.Add(testSchedule{0}, "runOnce", func(_ context.Context) []TaskFunc { + cont := func(_ context.Context) []TaskFunc { + // Make sure we actually wait for the task! + time.Sleep(time.Millisecond * 250) + atomic.AddUint32(executed, 1) + return nil + } + return []TaskFunc{cont} + }, "http", func() { atomic.AddUint32(waits, 1) }) + + s.WaitForRunOnce() + require.Equal(t, uint32(1), atomic.LoadUint32(executed)) + require.Equal(t, uint32(1), atomic.LoadUint32(waits)) } func TestScheduler_Stop(t *testing.T) { - s := NewWithLocation(10, monitoring.NewRegistry(), tarawaTime(), nil) + s := Create(10, monitoring.NewRegistry(), tarawaTime(), nil, false) executed := make(chan struct{}) - require.NoError(t, s.Start()) - require.NoError(t, s.Stop()) + s.Stop() _, err := s.Add(testSchedule{}, "testPostStop", testTaskTimes(1, func(_ context.Context) []TaskFunc { executed <- struct{}{} return nil - }), "http") + }), "http", nil) assert.Equal(t, ErrAlreadyStopped, err) } @@ -235,7 +249,7 @@ func TestSchedTaskLimits(t *testing.T) { jobType: {Limit: tt.limit}, } } - s := NewWithLocation(math.MaxInt64, monitoring.NewRegistry(), tarawaTime(), jobConfigByType) + s := Create(math.MaxInt64, monitoring.NewRegistry(), tarawaTime(), jobConfigByType, false) var taskArr []int wg := sync.WaitGroup{} wg.Add(tt.numJobs) @@ -257,7 +271,7 @@ func TestSchedTaskLimits(t *testing.T) { } func BenchmarkScheduler(b *testing.B) { - s := NewWithLocation(0, monitoring.NewRegistry(), tarawaTime(), nil) + s := Create(0, monitoring.NewRegistry(), tarawaTime(), nil, false) sched := testSchedule{0} @@ -266,13 +280,11 @@ func BenchmarkScheduler(b *testing.B) { _, err := s.Add(sched, "testPostStop", func(_ context.Context) []TaskFunc { executed <- struct{}{} return nil - }, "http") + }, "http", nil) assert.NoError(b, err) } - err := s.Start() defer s.Stop() - assert.NoError(b, err) count := 0 for count < b.N { diff --git a/x-pack/functionbeat/function/core/sync_client.go b/libbeat/publisher/pipeline/sync_client.go similarity index 75% rename from x-pack/functionbeat/function/core/sync_client.go rename to libbeat/publisher/pipeline/sync_client.go index cc1b0c37f57..464143b3149 100644 --- a/x-pack/functionbeat/function/core/sync_client.go +++ b/libbeat/publisher/pipeline/sync_client.go @@ -1,8 +1,21 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package core +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package pipeline import ( "sync" @@ -14,9 +27,8 @@ import ( ) // Client implements the interface used by all the functionbeat function, we only implement a synchronous -// client. This interface superseed the core beat.Client interface inside functionbeat because our publish -// and publishAll methods can return an error. -type Client interface { +// client. This interface superseed the core beat.Client interface and can return errors on publish. +type ISyncClient interface { // Publish accepts a unique events and will publish it to the pipeline. Publish(beat.Event) error diff --git a/x-pack/functionbeat/function/core/sync_client_test.go b/libbeat/publisher/pipeline/sync_client_test.go similarity index 73% rename from x-pack/functionbeat/function/core/sync_client_test.go rename to libbeat/publisher/pipeline/sync_client_test.go index 4d5284eeb5c..69a42164e1c 100644 --- a/x-pack/functionbeat/function/core/sync_client_test.go +++ b/libbeat/publisher/pipeline/sync_client_test.go @@ -1,8 +1,21 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package core +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package pipeline import ( "testing" diff --git a/x-pack/functionbeat/function/beater/functionbeat.go b/x-pack/functionbeat/function/beater/functionbeat.go index be8d59b9db5..9118fe77165 100644 --- a/x-pack/functionbeat/function/beater/functionbeat.go +++ b/x-pack/functionbeat/function/beater/functionbeat.go @@ -13,6 +13,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/fmtstr" "github.com/elastic/beats/v7/libbeat/outputs/elasticsearch" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" @@ -152,11 +153,11 @@ type fnExtraConfig struct { Index fmtstr.EventFormatString `config:"index"` } -func makeClientFactory(log *logp.Logger, pipeline beat.Pipeline, beatInfo beat.Info) func(*common.Config) (core.Client, error) { +func makeClientFactory(log *logp.Logger, pipe beat.Pipeline, beatInfo beat.Info) func(*common.Config) (pipeline.ISyncClient, error) { // Each function has his own client to the publisher pipeline, // publish operation will block the calling thread, when the method unwrap we have received the // ACK for the batch. - return func(cfg *common.Config) (core.Client, error) { + return func(cfg *common.Config) (pipeline.ISyncClient, error) { c := fnExtraConfig{} if err := cfg.Unpack(&c); err != nil { @@ -168,7 +169,7 @@ func makeClientFactory(log *logp.Logger, pipeline beat.Pipeline, beatInfo beat.I return nil, err } - client, err := core.NewSyncClient(log, pipeline, beat.ClientConfig{ + client, err := pipeline.NewSyncClient(log, pipe, beat.ClientConfig{ PublishMode: beat.GuaranteedSend, Processing: beat.ProcessingConfig{ Processor: funcProcessors, diff --git a/x-pack/functionbeat/function/provider/provider.go b/x-pack/functionbeat/function/provider/provider.go index a2198251a07..c08db30e1d4 100644 --- a/x-pack/functionbeat/function/provider/provider.go +++ b/x-pack/functionbeat/function/provider/provider.go @@ -13,16 +13,17 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/feature" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" "github.com/elastic/beats/v7/x-pack/functionbeat/function/telemetry" ) // Create a new pipeline client based on the function configuration. -type clientFactory func(*common.Config) (core.Client, error) +type clientFactory func(*common.Config) (pipeline.ISyncClient, error) // Function is temporary type Function interface { - Run(context.Context, core.Client, telemetry.T) error + Run(context.Context, pipeline.ISyncClient, telemetry.T) error Name() string } diff --git a/x-pack/functionbeat/function/provider/provider_test.go b/x-pack/functionbeat/function/provider/provider_test.go index e93d8bd5546..6e940ff302b 100644 --- a/x-pack/functionbeat/function/provider/provider_test.go +++ b/x-pack/functionbeat/function/provider/provider_test.go @@ -14,7 +14,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" - "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/telemetry" ) @@ -22,7 +22,7 @@ type simpleFunction struct { err error } -func (s *simpleFunction) Run(ctx context.Context, client core.Client, _ telemetry.T) error { +func (s *simpleFunction) Run(ctx context.Context, client pipeline.ISyncClient, _ telemetry.T) error { return s.err } @@ -42,7 +42,7 @@ func TestRunnable(t *testing.T) { err := errors.New("oops") runnable := Runnable{ config: common.NewConfig(), - makeClient: func(cfg *common.Config) (core.Client, error) { return nil, err }, + makeClient: func(cfg *common.Config) (pipeline.ISyncClient, error) { return nil, err }, function: &simpleFunction{err: nil}, } @@ -54,7 +54,7 @@ func TestRunnable(t *testing.T) { err := errors.New("function error") runnable := Runnable{ config: common.NewConfig(), - makeClient: func(cfg *common.Config) (core.Client, error) { return &mockClient{}, nil }, + makeClient: func(cfg *common.Config) (pipeline.ISyncClient, error) { return &mockClient{}, nil }, function: &simpleFunction{err: err}, } @@ -65,7 +65,7 @@ func TestRunnable(t *testing.T) { t.Run("when there is no error run and exit normaly", func(t *testing.T) { runnable := Runnable{ config: common.NewConfig(), - makeClient: func(cfg *common.Config) (core.Client, error) { return &mockClient{}, nil }, + makeClient: func(cfg *common.Config) (pipeline.ISyncClient, error) { return &mockClient{}, nil }, function: &simpleFunction{err: nil}, } diff --git a/x-pack/functionbeat/function/provider/registry_test.go b/x-pack/functionbeat/function/provider/registry_test.go index 3e05e43db69..cb24e0f0ca4 100644 --- a/x-pack/functionbeat/function/provider/registry_test.go +++ b/x-pack/functionbeat/function/provider/registry_test.go @@ -15,6 +15,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/feature" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" "github.com/elastic/beats/v7/x-pack/functionbeat/function/telemetry" ) @@ -49,8 +50,10 @@ type mockFunction struct { name string } -func (mf *mockFunction) Run(ctx context.Context, client core.Client, t telemetry.T) error { return nil } -func (mf *mockFunction) Name() string { return mf.name } +func (mf *mockFunction) Run(ctx context.Context, client pipeline.ISyncClient, t telemetry.T) error { + return nil +} +func (mf *mockFunction) Name() string { return mf.name } func testProviderLookup(t *testing.T) { name := "myprovider" diff --git a/x-pack/functionbeat/provider/aws/aws/api_gateway_proxy.go b/x-pack/functionbeat/provider/aws/aws/api_gateway_proxy.go index 6d897b9c893..c8ecfe8786c 100644 --- a/x-pack/functionbeat/provider/aws/aws/api_gateway_proxy.go +++ b/x-pack/functionbeat/provider/aws/aws/api_gateway_proxy.go @@ -16,7 +16,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/feature" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/provider" "github.com/elastic/beats/v7/x-pack/functionbeat/function/telemetry" "github.com/elastic/beats/v7/x-pack/functionbeat/provider/aws/aws/transformer" @@ -45,7 +45,7 @@ func APIGatewayProxyDetails() feature.Details { } // Run starts the lambda function and wait for web triggers. -func (a *APIGatewayProxy) Run(_ context.Context, client core.Client, telemetry telemetry.T) error { +func (a *APIGatewayProxy) Run(_ context.Context, client pipeline.ISyncClient, telemetry telemetry.T) error { telemetry.AddTriggeredFunction() lambda.Start(a.createHandler(client)) @@ -53,7 +53,7 @@ func (a *APIGatewayProxy) Run(_ context.Context, client core.Client, telemetry t } func (a *APIGatewayProxy) createHandler( - client core.Client, + client pipeline.ISyncClient, ) func(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { return func(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { a.log.Debugf("The handler receives a new event from the gateway (requestID: %s)", request.RequestContext.RequestID) diff --git a/x-pack/functionbeat/provider/aws/aws/cloudwatch_kinesis.go b/x-pack/functionbeat/provider/aws/aws/cloudwatch_kinesis.go index cb976702ea5..247e239dd49 100644 --- a/x-pack/functionbeat/provider/aws/aws/cloudwatch_kinesis.go +++ b/x-pack/functionbeat/provider/aws/aws/cloudwatch_kinesis.go @@ -15,7 +15,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/feature" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/provider" "github.com/elastic/beats/v7/x-pack/functionbeat/function/telemetry" "github.com/elastic/beats/v7/x-pack/functionbeat/provider/aws/aws/transformer" @@ -70,14 +70,14 @@ func CloudwatchKinesisDetails() feature.Details { } // Run starts the lambda function and wait for web triggers. -func (c *CloudwatchKinesis) Run(_ context.Context, client core.Client, t telemetry.T) error { +func (c *CloudwatchKinesis) Run(_ context.Context, client pipeline.ISyncClient, t telemetry.T) error { t.AddTriggeredFunction() lambdarunner.Start(c.createHandler(client)) return nil } -func (c *CloudwatchKinesis) createHandler(client core.Client) func(request events.KinesisEvent) error { +func (c *CloudwatchKinesis) createHandler(client pipeline.ISyncClient) func(request events.KinesisEvent) error { return func(request events.KinesisEvent) error { c.log.Debugf("The handler receives %d events", len(request.Records)) diff --git a/x-pack/functionbeat/provider/aws/aws/cloudwatch_logs.go b/x-pack/functionbeat/provider/aws/aws/cloudwatch_logs.go index ebe379dfe49..7cfaab4fcaf 100644 --- a/x-pack/functionbeat/provider/aws/aws/cloudwatch_logs.go +++ b/x-pack/functionbeat/provider/aws/aws/cloudwatch_logs.go @@ -22,7 +22,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/feature" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/provider" "github.com/elastic/beats/v7/x-pack/functionbeat/function/telemetry" "github.com/elastic/beats/v7/x-pack/functionbeat/provider/aws/aws/transformer" @@ -106,7 +106,7 @@ func CloudwatchLogsDetails() feature.Details { } // Run start the AWS lambda handles and will transform any events received to the pipeline. -func (c *CloudwatchLogs) Run(_ context.Context, client core.Client, t telemetry.T) error { +func (c *CloudwatchLogs) Run(_ context.Context, client pipeline.ISyncClient, t telemetry.T) error { t.AddTriggeredFunction() lambdarunner.Start(c.createHandler(client)) @@ -114,7 +114,7 @@ func (c *CloudwatchLogs) Run(_ context.Context, client core.Client, t telemetry. } func (c *CloudwatchLogs) createHandler( - client core.Client, + client pipeline.ISyncClient, ) func(request events.CloudwatchLogsEvent) error { return func(request events.CloudwatchLogsEvent) error { parsedEvent, err := request.AWSLogs.Parse() diff --git a/x-pack/functionbeat/provider/aws/aws/kinesis.go b/x-pack/functionbeat/provider/aws/aws/kinesis.go index 2cdc02d075e..3ff7a6513cf 100644 --- a/x-pack/functionbeat/provider/aws/aws/kinesis.go +++ b/x-pack/functionbeat/provider/aws/aws/kinesis.go @@ -20,7 +20,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/feature" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/provider" "github.com/elastic/beats/v7/x-pack/functionbeat/function/telemetry" "github.com/elastic/beats/v7/x-pack/functionbeat/provider/aws/aws/transformer" @@ -135,14 +135,14 @@ func KinesisDetails() feature.Details { } // Run starts the lambda function and wait for web triggers. -func (k *Kinesis) Run(_ context.Context, client core.Client, t telemetry.T) error { +func (k *Kinesis) Run(_ context.Context, client pipeline.ISyncClient, t telemetry.T) error { t.AddTriggeredFunction() lambdarunner.Start(k.createHandler(client)) return nil } -func (k *Kinesis) createHandler(client core.Client) func(request events.KinesisEvent) error { +func (k *Kinesis) createHandler(client pipeline.ISyncClient) func(request events.KinesisEvent) error { return func(request events.KinesisEvent) error { k.log.Debugf("The handler receives %d events", len(request.Records)) diff --git a/x-pack/functionbeat/provider/aws/aws/sqs.go b/x-pack/functionbeat/provider/aws/aws/sqs.go index 4bc1b9593e6..859c726884b 100644 --- a/x-pack/functionbeat/provider/aws/aws/sqs.go +++ b/x-pack/functionbeat/provider/aws/aws/sqs.go @@ -18,7 +18,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/feature" "github.com/elastic/beats/v7/libbeat/logp" - "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/provider" "github.com/elastic/beats/v7/x-pack/functionbeat/function/telemetry" "github.com/elastic/beats/v7/x-pack/functionbeat/provider/aws/aws/transformer" @@ -68,14 +68,14 @@ func SQSDetails() feature.Details { } // Run starts the lambda function and wait for web triggers. -func (s *SQS) Run(_ context.Context, client core.Client, t telemetry.T) error { +func (s *SQS) Run(_ context.Context, client pipeline.ISyncClient, t telemetry.T) error { t.AddTriggeredFunction() lambdarunner.Start(s.createHandler(client)) return nil } -func (s *SQS) createHandler(client core.Client) func(request events.SQSEvent) error { +func (s *SQS) createHandler(client pipeline.ISyncClient) func(request events.SQSEvent) error { return func(request events.SQSEvent) error { s.log.Debugf("The handler receives %d events", len(request.Records)) diff --git a/x-pack/functionbeat/provider/local/local/local.go b/x-pack/functionbeat/provider/local/local/local.go index d7b76824a94..d0d849688d0 100644 --- a/x-pack/functionbeat/provider/local/local/local.go +++ b/x-pack/functionbeat/provider/local/local/local.go @@ -13,7 +13,7 @@ import ( "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/feature" - "github.com/elastic/beats/v7/x-pack/functionbeat/function/core" + "github.com/elastic/beats/v7/libbeat/publisher/pipeline" "github.com/elastic/beats/v7/x-pack/functionbeat/function/provider" "github.com/elastic/beats/v7/x-pack/functionbeat/function/telemetry" ) @@ -44,7 +44,7 @@ func NewStdinFunction( // Run reads events from the STDIN and send them to the publisher pipeline, will stop reading by // either by an external signal to stop or by reaching EOF. When EOF is reached functionbeat will shutdown. -func (s *StdinFunction) Run(ctx context.Context, client core.Client, _ telemetry.T) error { +func (s *StdinFunction) Run(ctx context.Context, client pipeline.ISyncClient, _ telemetry.T) error { errChan := make(chan error) defer close(errChan) lineChan := make(chan string) From 81ed8f848aa04788cdcb73715bc776ceb081c464 Mon Sep 17 00:00:00 2001 From: Alex Resnick Date: Wed, 15 Dec 2021 03:35:27 -0600 Subject: [PATCH 09/57] Add default region config to AWS (#29415) * Add default regon config to AWS * update changelog * Add tests --- CHANGELOG.next.asciidoc | 1 + filebeat/docs/modules/aws.asciidoc | 10 ++++++ x-pack/filebeat/filebeat.reference.yml | 18 ++++++++++ x-pack/filebeat/module/aws/_meta/config.yml | 18 ++++++++++ .../filebeat/module/aws/_meta/docs.asciidoc | 10 ++++++ .../module/aws/cloudtrail/config/aws-s3.yml | 4 +-- .../module/aws/cloudtrail/manifest.yml | 1 + .../module/aws/cloudwatch/config/aws-s3.yml | 4 +++ .../module/aws/cloudwatch/manifest.yml | 1 + .../filebeat/module/aws/ec2/config/aws-s3.yml | 4 +++ x-pack/filebeat/module/aws/ec2/manifest.yml | 1 + .../filebeat/module/aws/elb/config/aws-s3.yml | 4 +++ x-pack/filebeat/module/aws/elb/manifest.yml | 1 + .../module/aws/s3access/config/aws-s3.yml | 4 +++ .../filebeat/module/aws/s3access/manifest.yml | 1 + .../module/aws/vpcflow/config/input.yml | 4 +++ .../filebeat/module/aws/vpcflow/manifest.yml | 1 + .../awsfargate/log/config/aws-cloudwatch.yml | 4 +++ .../module/awsfargate/log/manifest.yml | 1 + x-pack/filebeat/modules.d/aws.yml.disabled | 18 ++++++++++ x-pack/libbeat/common/aws/credentials.go | 18 +++++----- x-pack/libbeat/common/aws/credentials_test.go | 33 +++++++++++++++++++ .../docs/aws-credentials-config.asciidoc | 1 + 23 files changed, 150 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 02d272b963c..15434665d0b 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -230,6 +230,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Support self signed certificates on outputs {pull}29229[29229] - Update k8s library {pull}29394[29394] - Add FIPS configuration option for all AWS API calls. {pull}[28899] +- Add `default_region` config to AWS common module. {pull}[29415] *Auditbeat* diff --git a/filebeat/docs/modules/aws.asciidoc b/filebeat/docs/modules/aws.asciidoc index 097e22a741c..65bc668de05 100644 --- a/filebeat/docs/modules/aws.asciidoc +++ b/filebeat/docs/modules/aws.asciidoc @@ -61,6 +61,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -79,6 +80,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -97,6 +99,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -115,6 +118,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -133,6 +137,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -151,6 +156,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 ---- @@ -192,6 +198,10 @@ Prefix to apply for the list request to the S3 bucket. Default empty. Custom endpoint used to access AWS APIs. +*`var.default_region`*:: + +Default region to query if no other region is set. + *`var.shared_credential_file`*:: Filename of AWS credential file. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index d5df5da53ab..1abc29932d8 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -151,6 +151,9 @@ filebeat.modules: # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -209,6 +212,9 @@ filebeat.modules: # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -267,6 +273,9 @@ filebeat.modules: # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -325,6 +334,9 @@ filebeat.modules: # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -383,6 +395,9 @@ filebeat.modules: # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -441,6 +456,9 @@ filebeat.modules: # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb diff --git a/x-pack/filebeat/module/aws/_meta/config.yml b/x-pack/filebeat/module/aws/_meta/config.yml index 35d92a11bfe..60213ba811a 100644 --- a/x-pack/filebeat/module/aws/_meta/config.yml +++ b/x-pack/filebeat/module/aws/_meta/config.yml @@ -54,6 +54,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -112,6 +115,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -170,6 +176,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -228,6 +237,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -286,6 +298,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -344,6 +359,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb diff --git a/x-pack/filebeat/module/aws/_meta/docs.asciidoc b/x-pack/filebeat/module/aws/_meta/docs.asciidoc index 3fee8460161..b9afe334257 100644 --- a/x-pack/filebeat/module/aws/_meta/docs.asciidoc +++ b/x-pack/filebeat/module/aws/_meta/docs.asciidoc @@ -56,6 +56,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -74,6 +75,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -92,6 +94,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -110,6 +113,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -128,6 +132,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 @@ -146,6 +151,7 @@ Example config: #var.visibility_timeout: 300s #var.api_timeout: 120s #var.endpoint: amazonaws.com + #var.default_region: us-east-1 #var.role_arn: arn:aws:iam::123456789012:role/test-mb #var.proxy_url: http://proxy:8080 ---- @@ -187,6 +193,10 @@ Prefix to apply for the list request to the S3 bucket. Default empty. Custom endpoint used to access AWS APIs. +*`var.default_region`*:: + +Default region to query if no other region is set. + *`var.shared_credential_file`*:: Filename of AWS credential file. diff --git a/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml b/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml index 48f38574e71..8c98bc31be6 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml @@ -49,8 +49,8 @@ visibility_timeout: {{ .visibility_timeout }} api_timeout: {{ .api_timeout }} {{ end }} -{{ if .endpoint }} -endpoint: {{ .endpoint }} +{{ if .default_region }} +default_region: {{ .default_region }} {{ end }} {{ if .access_key_id }} diff --git a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml index 1eb9d949b01..f19760eb637 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/manifest.yml @@ -13,6 +13,7 @@ var: - name: visibility_timeout - name: api_timeout - name: endpoint + - name: default_region - name: access_key_id - name: secret_access_key - name: session_token diff --git a/x-pack/filebeat/module/aws/cloudwatch/config/aws-s3.yml b/x-pack/filebeat/module/aws/cloudwatch/config/aws-s3.yml index d972e73ee31..8ce1970290d 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/config/aws-s3.yml @@ -38,6 +38,10 @@ api_timeout: {{ .api_timeout }} endpoint: {{ .endpoint }} {{ end }} +{{ if .default_region }} +default_region: {{ .default_region }} +{{ end }} + {{ if .access_key_id }} access_key_id: {{ .access_key_id }} {{ end }} diff --git a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml index 7ffff3514a0..e52ba673757 100644 --- a/x-pack/filebeat/module/aws/cloudwatch/manifest.yml +++ b/x-pack/filebeat/module/aws/cloudwatch/manifest.yml @@ -13,6 +13,7 @@ var: - name: visibility_timeout - name: api_timeout - name: endpoint + - name: default_region - name: access_key_id - name: secret_access_key - name: session_token diff --git a/x-pack/filebeat/module/aws/ec2/config/aws-s3.yml b/x-pack/filebeat/module/aws/ec2/config/aws-s3.yml index d972e73ee31..8ce1970290d 100644 --- a/x-pack/filebeat/module/aws/ec2/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/ec2/config/aws-s3.yml @@ -38,6 +38,10 @@ api_timeout: {{ .api_timeout }} endpoint: {{ .endpoint }} {{ end }} +{{ if .default_region }} +default_region: {{ .default_region }} +{{ end }} + {{ if .access_key_id }} access_key_id: {{ .access_key_id }} {{ end }} diff --git a/x-pack/filebeat/module/aws/ec2/manifest.yml b/x-pack/filebeat/module/aws/ec2/manifest.yml index 7ffff3514a0..e52ba673757 100644 --- a/x-pack/filebeat/module/aws/ec2/manifest.yml +++ b/x-pack/filebeat/module/aws/ec2/manifest.yml @@ -13,6 +13,7 @@ var: - name: visibility_timeout - name: api_timeout - name: endpoint + - name: default_region - name: access_key_id - name: secret_access_key - name: session_token diff --git a/x-pack/filebeat/module/aws/elb/config/aws-s3.yml b/x-pack/filebeat/module/aws/elb/config/aws-s3.yml index d972e73ee31..8ce1970290d 100644 --- a/x-pack/filebeat/module/aws/elb/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/elb/config/aws-s3.yml @@ -38,6 +38,10 @@ api_timeout: {{ .api_timeout }} endpoint: {{ .endpoint }} {{ end }} +{{ if .default_region }} +default_region: {{ .default_region }} +{{ end }} + {{ if .access_key_id }} access_key_id: {{ .access_key_id }} {{ end }} diff --git a/x-pack/filebeat/module/aws/elb/manifest.yml b/x-pack/filebeat/module/aws/elb/manifest.yml index 5f0b2d16e3d..4ab87c2b686 100644 --- a/x-pack/filebeat/module/aws/elb/manifest.yml +++ b/x-pack/filebeat/module/aws/elb/manifest.yml @@ -13,6 +13,7 @@ var: - name: visibility_timeout - name: api_timeout - name: endpoint + - name: default_region - name: access_key_id - name: secret_access_key - name: session_token diff --git a/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml b/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml index d972e73ee31..8ce1970290d 100644 --- a/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/s3access/config/aws-s3.yml @@ -38,6 +38,10 @@ api_timeout: {{ .api_timeout }} endpoint: {{ .endpoint }} {{ end }} +{{ if .default_region }} +default_region: {{ .default_region }} +{{ end }} + {{ if .access_key_id }} access_key_id: {{ .access_key_id }} {{ end }} diff --git a/x-pack/filebeat/module/aws/s3access/manifest.yml b/x-pack/filebeat/module/aws/s3access/manifest.yml index 7ffff3514a0..e52ba673757 100644 --- a/x-pack/filebeat/module/aws/s3access/manifest.yml +++ b/x-pack/filebeat/module/aws/s3access/manifest.yml @@ -13,6 +13,7 @@ var: - name: visibility_timeout - name: api_timeout - name: endpoint + - name: default_region - name: access_key_id - name: secret_access_key - name: session_token diff --git a/x-pack/filebeat/module/aws/vpcflow/config/input.yml b/x-pack/filebeat/module/aws/vpcflow/config/input.yml index 6ac1ceccf66..5c08c0e6e38 100644 --- a/x-pack/filebeat/module/aws/vpcflow/config/input.yml +++ b/x-pack/filebeat/module/aws/vpcflow/config/input.yml @@ -40,6 +40,10 @@ api_timeout: {{ .api_timeout }} endpoint: {{ .endpoint }} {{ end }} +{{ if .default_region }} +default_region: {{ .default_region }} +{{ end }} + {{ if .access_key_id }} access_key_id: {{ .access_key_id }} {{ end }} diff --git a/x-pack/filebeat/module/aws/vpcflow/manifest.yml b/x-pack/filebeat/module/aws/vpcflow/manifest.yml index be8642f06cb..b329c7264f1 100644 --- a/x-pack/filebeat/module/aws/vpcflow/manifest.yml +++ b/x-pack/filebeat/module/aws/vpcflow/manifest.yml @@ -13,6 +13,7 @@ var: - name: visibility_timeout - name: api_timeout - name: endpoint + - name: default_region - name: access_key_id - name: secret_access_key - name: session_token diff --git a/x-pack/filebeat/module/awsfargate/log/config/aws-cloudwatch.yml b/x-pack/filebeat/module/awsfargate/log/config/aws-cloudwatch.yml index 958228e74da..08b37cd3aef 100644 --- a/x-pack/filebeat/module/awsfargate/log/config/aws-cloudwatch.yml +++ b/x-pack/filebeat/module/awsfargate/log/config/aws-cloudwatch.yml @@ -40,6 +40,10 @@ shared_credential_file: {{ .shared_credential_file }} endpoint: {{ .endpoint }} {{ end }} +{{ if .default_region }} +default_region: {{ .default_region }} +{{ end }} + {{ if .access_key_id }} access_key_id: {{ .access_key_id }} {{ end }} diff --git a/x-pack/filebeat/module/awsfargate/log/manifest.yml b/x-pack/filebeat/module/awsfargate/log/manifest.yml index 47fbc7697d1..9c724ff1a03 100644 --- a/x-pack/filebeat/module/awsfargate/log/manifest.yml +++ b/x-pack/filebeat/module/awsfargate/log/manifest.yml @@ -6,6 +6,7 @@ var: - name: shared_credential_file - name: credential_profile_name - name: endpoint + - name: default_region - name: access_key_id - name: secret_access_key - name: session_token diff --git a/x-pack/filebeat/modules.d/aws.yml.disabled b/x-pack/filebeat/modules.d/aws.yml.disabled index 1cde529a6d3..efca8245700 100644 --- a/x-pack/filebeat/modules.d/aws.yml.disabled +++ b/x-pack/filebeat/modules.d/aws.yml.disabled @@ -57,6 +57,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -115,6 +118,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -173,6 +179,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -231,6 +240,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -289,6 +301,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb @@ -347,6 +362,9 @@ # Custom endpoint used to access AWS APIs #var.endpoint: amazonaws.com + # Default region to query if no other region is set + #var.default_region: us-east-1 + # AWS IAM Role to assume #var.role_arn: arn:aws:iam::123456789012:role/test-mb diff --git a/x-pack/libbeat/common/aws/credentials.go b/x-pack/libbeat/common/aws/credentials.go index 9b6d80c9528..0b03155e0f1 100644 --- a/x-pack/libbeat/common/aws/credentials.go +++ b/x-pack/libbeat/common/aws/credentials.go @@ -40,11 +40,19 @@ type ConfigAWS struct { ProxyUrl string `config:"proxy_url"` FIPSEnabled bool `config:"fips_enabled"` TLS *tlscommon.Config `config:"ssl" yaml:"ssl,omitempty" json:"ssl,omitempty"` + DefaultRegion string `config:"default_region"` } // InitializeAWSConfig function creates the awssdk.Config object from the provided config func InitializeAWSConfig(config ConfigAWS) (awssdk.Config, error) { AWSConfig, _ := GetAWSCredentials(config) + if AWSConfig.Region == "" { + if config.DefaultRegion != "" { + AWSConfig.Region = config.DefaultRegion + } else { + AWSConfig.Region = "us-east-1" + } + } var proxy func(*http.Request) (*url.URL, error) if config.ProxyUrl != "" { proxyUrl, err := httpcommon.NewProxyURIFromString(config.ProxyUrl) @@ -97,11 +105,6 @@ func getAccessKeys(config ConfigAWS) awssdk.Config { Value: awsCredentials, } - // Set default region if empty to make initial aws api call - if awsConfig.Region == "" { - awsConfig.Region = "us-east-1" - } - // Assume IAM role if iam_role config parameter is given if config.RoleArn != "" { logger.Debug("Using role arn and access keys for AWS credential") @@ -135,11 +138,6 @@ func getSharedCredentialProfile(config ConfigAWS) (awssdk.Config, error) { return awsConfig, errors.Wrap(err, "external.LoadDefaultAWSConfig failed with shared credential profile given") } - // Set default region if empty to make initial aws api call - if awsConfig.Region == "" { - awsConfig.Region = "us-east-1" - } - // Assume IAM role if iam_role config parameter is given if config.RoleArn != "" { logger.Debug("Using role arn and shared credential profile for AWS credential") diff --git a/x-pack/libbeat/common/aws/credentials_test.go b/x-pack/libbeat/common/aws/credentials_test.go index e3c21875385..3b5c6233cfc 100644 --- a/x-pack/libbeat/common/aws/credentials_test.go +++ b/x-pack/libbeat/common/aws/credentials_test.go @@ -162,3 +162,36 @@ func TestCreateServiceName(t *testing.T) { }) } } + +func TestDefaultRegion(t *testing.T) { + cases := []struct { + title string + region string + expectedRegion string + }{ + { + "No default region set", + "", + "us-east-1", + }, + { + "us-west-1 region set as default", + "us-west-1", + "us-west-1", + }, + } + for _, c := range cases { + t.Run(c.title, func(t *testing.T) { + inputConfig := ConfigAWS{ + AccessKeyID: "123", + SecretAccessKey: "abc", + } + if c.region != "" { + inputConfig.DefaultRegion = c.region + } + awsConfig, err := InitializeAWSConfig(inputConfig) + assert.NoError(t, err) + assert.Equal(t, c.expectedRegion, awsConfig.Region) + }) + } +} diff --git a/x-pack/libbeat/docs/aws-credentials-config.asciidoc b/x-pack/libbeat/docs/aws-credentials-config.asciidoc index 63a0a0cb639..3ccfd09a84d 100644 --- a/x-pack/libbeat/docs/aws-credentials-config.asciidoc +++ b/x-pack/libbeat/docs/aws-credentials-config.asciidoc @@ -20,6 +20,7 @@ the `endpoint-code` part, such as `amazonaws.com`, `amazonaws.com.cn`, `c2s.ic.g * *proxy_url*: URL of the proxy to use to connect to AWS web services. The syntax is `http(s)://:` * *fips_enabled*: Enabling this option changes the service names from `s3` to `s3-fips` for connecting to the correct service endpoint. For example: `s3-fips.us-gov-east-1.amazonaws.com`. All services used by Beats are FIPS compatible except for `tagging` but only certain regions are FIPS compatible. See https://aws.amazon.com/compliance/fips/ or the appropriate service page, https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html, for a full list of FIPS endpoints and regions. * *ssl*: This specifies SSL/TLS configuration. If the ssl section is missing, the host's CAs are used for HTTPS connections. See <> for more information. +* *default_region*: Default region to query if no other region is set. [float] ==== Supported Formats From d06ac626ffe65922b338630b1330b96fa2ee3a0b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 15 Dec 2021 05:42:12 -0500 Subject: [PATCH 10/57] [Automation] Update go release version to 1.17.5 (#29384) Co-authored-by: apmmachine --- .go-version | 2 +- auditbeat/Dockerfile | 2 +- filebeat/Dockerfile | 2 +- heartbeat/Dockerfile | 2 +- libbeat/Dockerfile | 2 +- libbeat/docs/version.asciidoc | 2 +- metricbeat/Dockerfile | 2 +- packetbeat/Dockerfile | 2 +- x-pack/elastic-agent/Dockerfile | 2 +- x-pack/functionbeat/Dockerfile | 2 +- x-pack/libbeat/Dockerfile | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.go-version b/.go-version index 06fb41b6322..ff278344b33 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.17.2 +1.17.5 diff --git a/auditbeat/Dockerfile b/auditbeat/Dockerfile index ceccec4026d..061539b6dcf 100644 --- a/auditbeat/Dockerfile +++ b/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.5 RUN \ apt-get update \ diff --git a/filebeat/Dockerfile b/filebeat/Dockerfile index b4db2123456..763572e5a99 100644 --- a/filebeat/Dockerfile +++ b/filebeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.5 RUN \ apt-get update \ diff --git a/heartbeat/Dockerfile b/heartbeat/Dockerfile index 0108b788c35..355510ff31f 100644 --- a/heartbeat/Dockerfile +++ b/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.5 RUN \ apt-get update \ diff --git a/libbeat/Dockerfile b/libbeat/Dockerfile index e02f709d191..3ff2e7a8ce4 100644 --- a/libbeat/Dockerfile +++ b/libbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.5 RUN \ apt-get update \ diff --git a/libbeat/docs/version.asciidoc b/libbeat/docs/version.asciidoc index 47386bc5ca1..a09f29c66bf 100644 --- a/libbeat/docs/version.asciidoc +++ b/libbeat/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.0.0 :doc-branch: master -:go-version: 1.17.2 +:go-version: 1.17.5 :release-state: unreleased :python: 3.7 :docker: 1.12 diff --git a/metricbeat/Dockerfile b/metricbeat/Dockerfile index 0e9f1d08d14..d9166f054f1 100644 --- a/metricbeat/Dockerfile +++ b/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.5 RUN \ apt update \ diff --git a/packetbeat/Dockerfile b/packetbeat/Dockerfile index d7d9ed89fe6..6223e3d70f0 100644 --- a/packetbeat/Dockerfile +++ b/packetbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.5 RUN \ apt-get update \ diff --git a/x-pack/elastic-agent/Dockerfile b/x-pack/elastic-agent/Dockerfile index 5b2e9d84586..9c7dce7a435 100644 --- a/x-pack/elastic-agent/Dockerfile +++ b/x-pack/elastic-agent/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.17.2 +ARG GO_VERSION=1.17.5 FROM circleci/golang:${GO_VERSION} diff --git a/x-pack/functionbeat/Dockerfile b/x-pack/functionbeat/Dockerfile index 70d7f46a2cd..f07760a8487 100644 --- a/x-pack/functionbeat/Dockerfile +++ b/x-pack/functionbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.5 RUN \ apt-get update \ diff --git a/x-pack/libbeat/Dockerfile b/x-pack/libbeat/Dockerfile index 2c51fb632b2..12ce0e09203 100644 --- a/x-pack/libbeat/Dockerfile +++ b/x-pack/libbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.5 RUN \ apt-get update \ From 6c268f8f0ec1d8ad3f7590d387133412654ba839 Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Wed, 15 Dec 2021 15:22:24 +0100 Subject: [PATCH 11/57] CA trusted fingerprint tests (#29347) This commit adds tests to `trustRootCA` and `makeVerifyConnection`, the tests aim to cover the usage of `ssl.ca_tursted_fingerprint`. Some existing tests were refactored, `openTestCerts` receives a `testing.TB` and fail tests in case of an error --- .../transport/tlscommon/testdata/es-leaf.crt | 32 +++ .../tlscommon/testdata/es-root-ca-cert.crt | 31 +++ .../transport/tlscommon/tls_config_test.go | 196 ++++++++++++++++-- 3 files changed, 247 insertions(+), 12 deletions(-) create mode 100644 libbeat/common/transport/tlscommon/testdata/es-leaf.crt create mode 100644 libbeat/common/transport/tlscommon/testdata/es-root-ca-cert.crt diff --git a/libbeat/common/transport/tlscommon/testdata/es-leaf.crt b/libbeat/common/transport/tlscommon/testdata/es-leaf.crt new file mode 100644 index 00000000000..89d5087eb94 --- /dev/null +++ b/libbeat/common/transport/tlscommon/testdata/es-leaf.crt @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFeDCCA2CgAwIBAgIUV7+XlHjcV++/ezqTkJrXSFc1dpAwDQYJKoZIhvcNAQEL +BQAwPDE6MDgGA1UEAxMxRWxhc3RpY3NlYXJjaCBzZWN1cml0eSBhdXRvLWNvbmZp +Z3VyYXRpb24gSFRUUCBDQTAeFw0yMTExMzAxMDMzNTdaFw0yMzExMzAxMDMzNTda +MBExDzANBgNVBAMTBngtd2luZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBALL045X6ywAHg9tWuViNyXu30rHhJa/AI45ZwLWzQMEwnCWnMvV0Cy3FgUd6 +VKw4Rg55/SfBKShhTRjC4PmDIHDIBgpm4NWpREIW2+cZfeEU8B34ucK/ZHycTFQ1 +Guh8HfvFy5J3OYT+8Wfz94ZxvVLMOGROTSiWdL2foVk98tbHgL1K3qyv1v0rgIjt +smZ7G4tbl3sBCuYceUL7X/+0kavJGls2T/rtxxEIfj5dNz4h65KmABrrAJfrEx35 +y2jCdY2XQsBxxMvbHEXXJKhrjQ8pajMcWAlDBKweiNIDdgBDYWpodpr4f3A6ZJkM +Nplw7KyLna4s3BO/g7fd5/FyQGFuLPraFtFnTXGqH+LjX0td74bdSP22/uhU3cKY +3y64I3/HEaEY5JITgUArExcMVpXuKJKqXEb+LtjGmUbAiO8Z7QKL+PqmU+3tJJ0p +kXnS07m3F/MgrDir/VCnYGQcXeteBwEgmcOwPmxz98eOSBhtb0PrimycF2tQuT8b +mCU+evTPC+KQ+8XY5vBwdPGpf6YAaHuVhNtKqBQnYOpsadS7zw5DJ0Y1Kp9z0ZPL +ch4DxE40xqAFmxWnAfpy2scD8LGJ1zDII90tAtYdu+3Wlzj6uMqUdqPuJED7XD41 +mlF2OjB5ipTs/1Jjl3pEnGG94sw5bQmnS1xFQp/DO3mjlgFBAgMBAAGjgZwwgZkw +HQYDVR0OBBYEFJKNxskBHE5xQ9S24puXSKm6/bLKMB8GA1UdIwQYMBaAFHEdsBBS +VCiK0fDIVe2vNN8JvHmcMEwGA1UdEQRFMEOHEP6AAAAAAAAAtw+3JU5DX8mCCWxv +Y2FsaG9zdIcQAAAAAAAAAAAAAAAAAAAAAYcEfwAAAYcEwKgqtoIGeC13aW5nMAkG +A1UdEwQCMAAwDQYJKoZIhvcNAQELBQADggIBAF5JAIQ9cu2xroh2F85fBr/F0s8D +aRV6AJpkjSVKInMm7omn+GLB80TwQZ6NsGuXrbaq0rcM85khsBs4rWn5MqescYG/ +8A7gZ4EtYE3LIyeqiqBByrtIqszZeXm7ITDSF/lwn7X2swe7orkhVD4tVEvKH6L6 +Ql0oNe5UBN1Rm9NskDltMDzE2A25slkm99CAdPERDEjBpvd3eDcfbQdHeuAOPfUV +T8P2DAdW4SC955bxnc0GPTla5TKXWWLde3egow5a4LeJv6KVWPTC9chEXZyQKp4p +jvWZW1fTO/kC3oj97tfqoH/r35/+qyXmg38HNAFbEoVM3bsO0vqrI5CbkWTkB1Xb +7CY6jJxemyEprl2gmkgfA/MXBHFc3RoIL7JcX7Sk8ZWpnEVK3KyoyK1RJ5kY1Cz4 +SRw4KLJA4Cu6DE7vXy9pTlIeeQARgQOUxnrlRGYHpKRIwgjrhwEjVqc0CPwj7rWr +0VY4MW80FPFIePpqy3DjoJmORQU632iu/5zeUS4dZ11Ms7NTakqqnFHi7XczqeZn +4HqPW8ebQTXrqRXMF/X30x6gkK1R1tXHSbve7cTQWJEwJd+MS2aA5Npt7hGznjPn +Y1p4k9jEz5BnbLtZ2RbAj2FuL4Ee6iJoyZpFbi/SW+h+1ZaPCeUTnxUkDLEiXpdk +tN8H6/6dudhy6btm +-----END CERTIFICATE----- diff --git a/libbeat/common/transport/tlscommon/testdata/es-root-ca-cert.crt b/libbeat/common/transport/tlscommon/testdata/es-root-ca-cert.crt new file mode 100644 index 00000000000..6234774adc9 --- /dev/null +++ b/libbeat/common/transport/tlscommon/testdata/es-root-ca-cert.crt @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIUAoPlJ3hVr921EyJfiT+9lVft3fcwDQYJKoZIhvcNAQEL +BQAwPDE6MDgGA1UEAxMxRWxhc3RpY3NlYXJjaCBzZWN1cml0eSBhdXRvLWNvbmZp +Z3VyYXRpb24gSFRUUCBDQTAeFw0yMTExMzAxMDMzNTdaFw0yNDExMjkxMDMzNTda +MDwxOjA4BgNVBAMTMUVsYXN0aWNzZWFyY2ggc2VjdXJpdHkgYXV0by1jb25maWd1 +cmF0aW9uIEhUVFAgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2 +soq+heCJNHsMuyyyLndREhYmxYFav06XOLB5oC1bAt+0WMo3n7rxVB8dAhfvigof +DsTIytnCcK+Th8ll2k4Bs2weF16ZhvvC2FKbSkdUxNXnXfx7gdKDXZLbfref5FiL +ucwxa7CtVL28Lfws9J5dZTTAuxR2XxaX+TJbH6MbQgKUYR+DnK8T3jSfiDTQtiHs ++pd+C8hSdMgzKCynYP36VZbtz1ynWjvQ/0wxARO6q2OLZGBNh2ncoFEmosXgc0ir +Vh9NrVmozSI0H2f6W07imqL3oe1pe3bwW/OdfeahCBY3IvDLDn8q8wDl91gRta3n +EsMsiuBRSRRpT0grgoCFNy+wiIrETVLaI2HJ0UpVIpcoS7K5l2zN/wA+w+hAOdh0 +PoBt8AoC1aCCGM4osCTKqbgbOg957io2twuvWJ6ae3J2k5FFDMvIfMfL+5HhPSRp +nYiRDPOhapDhaXhHa4pEFONpdiJJgmqymLqjW4liZOGft28dSkISK3iiBL74p/gu +X/sBI7PZANycpyVjnLHK+FwPlRZPkrqCw2Gke4Oqm9uydwM08uRVZcNylVS7H0ip +9BEcxKlXJSaULnTqQXkiPGKGkCrrIIsNQTFjoaBIBP2o69NSZ0SozDf4aCnYy10v +U1dwI9yisOmMfDkakNcAPXfRfmuuJlstl1W1RraQswIDAQABo1MwUTAdBgNVHQ4E +FgQUcR2wEFJUKIrR8MhV7a803wm8eZwwHwYDVR0jBBgwFoAUcR2wEFJUKIrR8MhV +7a803wm8eZwwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAiHrC +NxCNsyUYLFVivL9AsJ5Y3IrhAHUzYwofLBJiMYNFsaEi3P1VU3TNlo98kzi2QkdY +NPFtRYoOg6sEI0KPEBw54kLP/Q/FJK7jeJSyhJ9V/Z+NS081YHqrMP4YPK6mM4qa +XuM7hpx37vkLDdfrDPionbcLk7Zz+2t6bIThrwta0idMY6LKeFfW1EWeggK6inNc +Ub3n1qcTyOp1RfcLlHCdb17JhgY5hROmqVfhgLlbT0bx1NZS4pRWhw5CDKsflMUe +SyHbLE1BTH6yE0nNXbR6FgDKjQNUSSZBOBck0hdSaRArALavujjBojHmJYWt1jWO +bcBErzwKKwH/peUh7Wgnq1L/lqym9K9AniWUyhvKn8AbxGLnILDMYOSrvlPF2uU+ +uvp2EzhPUyOgYycC28H4fFUdDeoN5FVP+4sFFK+FIgfqLfVMTgDPmGAbkqA6WKlH +fgQ2fP4oB2ZkN0EPxivXkvZkhDVlIXeoisUkNCgAfVuwCjvOLnqz8u0tTnp/wXxq +XAXUPLcG71YFzABlkwuPdA5GhFAL1Rv8GQJEznhZ8mYz/yTtcg/z3pYEhDcM92Cb +161BormFYVRI1B80rSpzeQwJVfvgCwnWOTat+1joFHCzpl99nHu8tMxi6lkO1G9E +8vdk/J0zMMnhO52V2EMNdH2fTJUMZYixBm4BeEM= +-----END CERTIFICATE----- diff --git a/libbeat/common/transport/tlscommon/tls_config_test.go b/libbeat/common/transport/tlscommon/tls_config_test.go index 76dfa61497f..2fd0b76e2a0 100644 --- a/libbeat/common/transport/tlscommon/tls_config_test.go +++ b/libbeat/common/transport/tlscommon/tls_config_test.go @@ -26,13 +26,11 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestMakeVerifyServerConnection(t *testing.T) { - testCerts, err := openTestCerts() - if err != nil { - t.Fatalf("failed to open test certs: %+v", err) - } + testCerts := openTestCerts(t) testCA, errs := LoadCertificateAuthorities([]string{ filepath.Join("testdata", "ca.crt"), @@ -159,7 +157,6 @@ func TestMakeVerifyServerConnection(t *testing.T) { for name, test := range testcases { t.Run(name, func(t *testing.T) { - test := test cfg := &TLSConfig{ Verification: test.verificationMode, ClientAuth: test.clientAuth, @@ -177,16 +174,20 @@ func TestMakeVerifyServerConnection(t *testing.T) { ServerName: test.serverName, }) if test.expectedError == nil { - assert.Nil(t, err) + assert.NoError(t, err) } else { - assert.Error(t, test.expectedError, err) + require.Error(t, err) + // We want to ensure the error type/message are the expected ones + // so we compare the types and the message + assert.IsType(t, test.expectedError, err) + assert.Contains(t, err.Error(), test.expectedError.Error()) } }) } - } -func openTestCerts() (map[string]*x509.Certificate, error) { +func openTestCerts(t testing.TB) map[string]*x509.Certificate { + t.Helper() certs := make(map[string]*x509.Certificate, 0) for testcase, certname := range map[string]string{ @@ -194,19 +195,190 @@ func openTestCerts() (map[string]*x509.Certificate, error) { "unknown authority": "unsigned_tls.crt", "correct": "client1.crt", "wildcard": "server.crt", + "es-leaf": "es-leaf.crt", + "es-root-ca": "es-root-ca-cert.crt", } { certBytes, err := ioutil.ReadFile(filepath.Join("testdata", certname)) if err != nil { - return nil, err + t.Fatalf("reading file %q: %+v", certname, err) } block, _ := pem.Decode(certBytes) testCert, err := x509.ParseCertificate(block.Bytes) if err != nil { - return nil, err + t.Fatalf("parsing certificate %q: %+v", certname, err) } certs[testcase] = testCert } - return certs, nil + return certs +} + +func TestTrustRootCA(t *testing.T) { + certs := openTestCerts(t) + + nonEmptyCertPool := x509.NewCertPool() + nonEmptyCertPool.AddCert(certs["wildcard"]) + nonEmptyCertPool.AddCert(certs["unknown authority"]) + + testCases := []struct { + name string + rootCAs *x509.CertPool + caTrustedFingerprint string + peerCerts []*x509.Certificate + expectingError bool + expectedRootCAsLen int + }{ + { + name: "RootCA cert matches the fingerprint and is added to cfg.RootCAs", + caTrustedFingerprint: "e83171aa133b2b507e057fe091e296a7e58e9653c2b88d203b64a47eef6ec62b", + peerCerts: []*x509.Certificate{certs["es-leaf"], certs["es-root-ca"]}, + expectedRootCAsLen: 1, + }, + { + name: "RootCA cert doesn not matche the fingerprint and is not added to cfg.RootCAs", + caTrustedFingerprint: "e83171aa133b2b507e057fe091e296a7e58e9653c2b88d203b64a47eef6ec62b", + peerCerts: []*x509.Certificate{certs["es-leaf"], certs["es-root-ca"]}, + expectedRootCAsLen: 0, + }, + { + name: "non empty CertPool has the RootCA added", + rootCAs: nonEmptyCertPool, + caTrustedFingerprint: "e83171aa133b2b507e057fe091e296a7e58e9653c2b88d203b64a47eef6ec62b", + peerCerts: []*x509.Certificate{certs["es-leaf"], certs["es-root-ca"]}, + expectedRootCAsLen: 3, + }, + { + name: "invalis HEX encoding", + caTrustedFingerprint: "INVALID ENCODING", + expectedRootCAsLen: 0, + expectingError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cfg := TLSConfig{ + RootCAs: tc.rootCAs, + CATrustedFingerprint: tc.caTrustedFingerprint, + } + err := trustRootCA(&cfg, tc.peerCerts) + if tc.expectingError && err == nil { + t.Fatal("expecting an error when calling trustRootCA") + } + + if !tc.expectingError && err != nil { + t.Fatalf("did not expect an error calling trustRootCA: %v", err) + } + + if tc.expectedRootCAsLen != 0 { + if cfg.RootCAs == nil { + t.Fatal("cfg.RootCAs cannot be nil") + } + + // we want to know the number of certificates in the CertPool (RootCAs), as it is not + // directly available, we use this workaround of reading the number of subjects in the pool. + if got, expected := len(cfg.RootCAs.Subjects()), tc.expectedRootCAsLen; got != expected { + t.Fatalf("expecting cfg.RootCAs to have %d element, got %d instead", expected, got) + } + } + }) + } +} + +func TestMakeVerifyConnectionUsesCATrustedFingerprint(t *testing.T) { + testCerts := openTestCerts(t) + + testcases := map[string]struct { + verificationMode TLSVerificationMode + peerCerts []*x509.Certificate + serverName string + expectedCallback bool + expectingError bool + CATrustedFingerprint string + CASHA256 []string + }{ + "CATrustedFingerprint and verification mode:VerifyFull": { + verificationMode: VerifyFull, + peerCerts: []*x509.Certificate{testCerts["es-leaf"], testCerts["es-root-ca"]}, + serverName: "localhost", + expectedCallback: true, + CATrustedFingerprint: "e83171aa133b2b507e057fe091e296a7e58e9653c2b88d203b64a47eef6ec62b", + }, + "CATrustedFingerprint and verification mode:VerifyCertificate": { + verificationMode: VerifyCertificate, + peerCerts: []*x509.Certificate{testCerts["es-leaf"], testCerts["es-root-ca"]}, + serverName: "localhost", + expectedCallback: true, + CATrustedFingerprint: "e83171aa133b2b507e057fe091e296a7e58e9653c2b88d203b64a47eef6ec62b", + }, + "CATrustedFingerprint and verification mode:VerifyStrict": { + verificationMode: VerifyStrict, + peerCerts: []*x509.Certificate{testCerts["es-leaf"], testCerts["es-root-ca"]}, + serverName: "localhost", + expectedCallback: true, + CATrustedFingerprint: "e83171aa133b2b507e057fe091e296a7e58e9653c2b88d203b64a47eef6ec62b", + CASHA256: []string{Fingerprint(testCerts["es-leaf"])}, + }, + "CATrustedFingerprint and verification mode:VerifyNone": { + verificationMode: VerifyNone, + peerCerts: []*x509.Certificate{testCerts["es-leaf"], testCerts["es-root-ca"]}, + serverName: "localhost", + expectedCallback: false, + }, + "invalid CATrustedFingerprint and verification mode:VerifyFull returns error": { + verificationMode: VerifyFull, + peerCerts: []*x509.Certificate{testCerts["es-leaf"], testCerts["es-root-ca"]}, + serverName: "localhost", + expectedCallback: true, + CATrustedFingerprint: "INVALID HEX ENCODING", + expectingError: true, + }, + "invalid CATrustedFingerprint and verification mode:VerifyCertificate returns error": { + verificationMode: VerifyCertificate, + peerCerts: []*x509.Certificate{testCerts["es-leaf"], testCerts["es-root-ca"]}, + serverName: "localhost", + expectedCallback: true, + CATrustedFingerprint: "INVALID HEX ENCODING", + expectingError: true, + }, + "invalid CATrustedFingerprint and verification mode:VerifyStrict returns error": { + verificationMode: VerifyStrict, + peerCerts: []*x509.Certificate{testCerts["es-leaf"], testCerts["es-root-ca"]}, + serverName: "localhost", + expectedCallback: true, + CATrustedFingerprint: "INVALID HEX ENCODING", + expectingError: true, + CASHA256: []string{Fingerprint(testCerts["es-leaf"])}, + }, + } + + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + cfg := &TLSConfig{ + Verification: test.verificationMode, + CATrustedFingerprint: test.CATrustedFingerprint, + CASha256: test.CASHA256, + } + + verifier := makeVerifyConnection(cfg) + if test.expectedCallback { + require.NotNil(t, verifier, "makeVerifyConnection returned a nil verifier") + } else { + require.Nil(t, verifier) + return + } + + err := verifier(tls.ConnectionState{ + PeerCertificates: test.peerCerts, + ServerName: test.serverName, + VerifiedChains: [][]*x509.Certificate{test.peerCerts}, + }) + if test.expectingError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } } From 533eefb74f773cfa34a15a9cdd74c8365db2f355 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 16 Dec 2021 02:24:18 -0500 Subject: [PATCH 12/57] [Automation] Update elastic stack version to 8.1.0-d8a3a806 for testing (#29463) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 7ca185c80fe..ce828d0f5ab 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-b6e7e1f3-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-d8a3a806-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -21,7 +21,7 @@ services: - "script.context.template.cache_max_size=2000" logstash: - image: docker.elastic.co/logstash/logstash-oss:8.1.0-b6e7e1f3-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:8.1.0-d8a3a806-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -31,7 +31,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-b6e7e1f3-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-d8a3a806-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 4cf5ba1db37..2ef97dfeebd 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-b6e7e1f3-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-d8a3a806-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -37,7 +37,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-b6e7e1f3-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-d8a3a806-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From 9201a929eb90e7e76af89841e43c727d98186bfa Mon Sep 17 00:00:00 2001 From: Alex Resnick Date: Thu, 16 Dec 2021 09:04:32 -0600 Subject: [PATCH 13/57] [Filebeat] Enable dynamic inputs (TCP) for Cisco syslog modules (#26159) - Add tcp option to asa, ftd & ios filesets - Add SSL option Closes #28821 Co-authored-by: Lee E. Hinman --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/filebeat.reference.yml | 37 ++++++++++++++----- x-pack/filebeat/module/cisco/_meta/config.yml | 37 ++++++++++++++----- .../module/cisco/asa/config/input.yml | 14 +++---- x-pack/filebeat/module/cisco/asa/manifest.yml | 3 +- .../module/cisco/ftd/config/input.yml | 13 ++++--- x-pack/filebeat/module/cisco/ftd/manifest.yml | 3 +- .../module/cisco/ios/config/input.yml | 14 +++---- x-pack/filebeat/module/cisco/ios/manifest.yml | 2 + x-pack/filebeat/modules.d/cisco.yml.disabled | 37 ++++++++++++++----- 10 files changed, 109 insertions(+), 52 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 15434665d0b..cef220be926 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -267,6 +267,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add documentation for add_kubernetes_metadata processors `log_path` matcher. {pull}28868[28868] - Add support for parsers on journald input {pull}29070[29070] - Add support in httpjson input for oAuth2ProviderDefault of password grant_type. {pull}29087[29087] +- Update Cisco module to enable TCP input. {issue}26118[26118] {issue}28821[28821] {pull}26159[26159] *Heartbeat* diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 1abc29932d8..796d6934046 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -685,16 +685,23 @@ filebeat.modules: asa: enabled: false - # Set which input to use between syslog (default) or file. - #var.input: syslog + # Set which input to use between udp (default), tcp or file. + #var.input: udp - # The interface to listen to UDP based syslog traffic. Defaults to + # The interface to listen to udp or tcp syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost - # The UDP port to listen for syslog traffic. Defaults to 9001. + # The port to listen for udp or tcp syslog traffic. Defaults to 9001. #var.syslog_port: 9001 + # With tcp input, set the optional tls configuration: + #var.ssl: + # enabled: true + # certificate: /path/to/cert.pem + # key: /path/to/privatekey.pem + # key_passphrase: 'password for my key' + # Set the log level from 1 (alerts only) to 7 (include all messages). # Messages with a log level higher than the specified will be dropped. # See https://www.cisco.com/c/en/us/td/docs/security/asa/syslog/b_syslog/syslogs-sev-level.html @@ -711,16 +718,23 @@ filebeat.modules: ftd: enabled: false - # Set which input to use between syslog (default) or file. - #var.input: syslog + # Set which input to use between udp (default), tcp or file. + #var.input: udp - # The interface to listen to UDP based syslog traffic. Defaults to + # The interface to listen to tcp or udp syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost - # The UDP port to listen for syslog traffic. Defaults to 9003. + # The UDP port to listen for tcp or udp syslog traffic. Defaults to 9003. #var.syslog_port: 9003 + # With tcp input, set the optional tls configuration: + #var.ssl: + # enabled: true + # certificate: /path/to/cert.pem + # key: /path/to/privatekey.pem + # key_passphrase: 'password for my key' + # Set the log level from 1 (alerts only) to 7 (include all messages). # Messages with a log level higher than the specified will be dropped. # See https://www.cisco.com/c/en/us/td/docs/security/firepower/Syslogs/b_fptd_syslog_guide/syslogs-sev-level.html @@ -740,13 +754,16 @@ filebeat.modules: # Set which input to use between syslog (default) or file. #var.input: syslog - # The interface to listen to UDP based syslog traffic. Defaults to + # The interface to listen to syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost - # The UDP port to listen for syslog traffic. Defaults to 9002. + # The port to listen on for syslog traffic. Defaults to 9002. #var.syslog_port: 9002 + # Set which protocol to use between udp (default) or tcp. + #var.syslog_protocol: udp + # Set custom paths for the log files when using file input. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: diff --git a/x-pack/filebeat/module/cisco/_meta/config.yml b/x-pack/filebeat/module/cisco/_meta/config.yml index 3fd735c050d..1b2940129bf 100644 --- a/x-pack/filebeat/module/cisco/_meta/config.yml +++ b/x-pack/filebeat/module/cisco/_meta/config.yml @@ -2,16 +2,23 @@ asa: enabled: false - # Set which input to use between syslog (default) or file. - #var.input: syslog + # Set which input to use between udp (default), tcp or file. + #var.input: udp - # The interface to listen to UDP based syslog traffic. Defaults to + # The interface to listen to udp or tcp syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost - # The UDP port to listen for syslog traffic. Defaults to 9001. + # The port to listen for udp or tcp syslog traffic. Defaults to 9001. #var.syslog_port: 9001 + # With tcp input, set the optional tls configuration: + #var.ssl: + # enabled: true + # certificate: /path/to/cert.pem + # key: /path/to/privatekey.pem + # key_passphrase: 'password for my key' + # Set the log level from 1 (alerts only) to 7 (include all messages). # Messages with a log level higher than the specified will be dropped. # See https://www.cisco.com/c/en/us/td/docs/security/asa/syslog/b_syslog/syslogs-sev-level.html @@ -28,16 +35,23 @@ ftd: enabled: false - # Set which input to use between syslog (default) or file. - #var.input: syslog + # Set which input to use between udp (default), tcp or file. + #var.input: udp - # The interface to listen to UDP based syslog traffic. Defaults to + # The interface to listen to tcp or udp syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost - # The UDP port to listen for syslog traffic. Defaults to 9003. + # The UDP port to listen for tcp or udp syslog traffic. Defaults to 9003. #var.syslog_port: 9003 + # With tcp input, set the optional tls configuration: + #var.ssl: + # enabled: true + # certificate: /path/to/cert.pem + # key: /path/to/privatekey.pem + # key_passphrase: 'password for my key' + # Set the log level from 1 (alerts only) to 7 (include all messages). # Messages with a log level higher than the specified will be dropped. # See https://www.cisco.com/c/en/us/td/docs/security/firepower/Syslogs/b_fptd_syslog_guide/syslogs-sev-level.html @@ -57,13 +71,16 @@ # Set which input to use between syslog (default) or file. #var.input: syslog - # The interface to listen to UDP based syslog traffic. Defaults to + # The interface to listen to syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost - # The UDP port to listen for syslog traffic. Defaults to 9002. + # The port to listen on for syslog traffic. Defaults to 9002. #var.syslog_port: 9002 + # Set which protocol to use between udp (default) or tcp. + #var.syslog_protocol: udp + # Set custom paths for the log files when using file input. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: diff --git a/x-pack/filebeat/module/cisco/asa/config/input.yml b/x-pack/filebeat/module/cisco/asa/config/input.yml index 4237b4d9ae2..cb9df5bd6ec 100644 --- a/x-pack/filebeat/module/cisco/asa/config/input.yml +++ b/x-pack/filebeat/module/cisco/asa/config/input.yml @@ -1,10 +1,4 @@ -{{ if eq .input "syslog" }} - -type: udp -udp: -host: "{{.syslog_host}}:{{.syslog_port}}" - -{{ else if eq .input "file" }} +{{ if eq .input "file" }} type: log paths: @@ -13,6 +7,12 @@ paths: {{ end }} exclude_files: [".gz$"] +{{ else }} + +type: {{.input}} +host: "{{.syslog_host}}:{{.syslog_port}}" +ssl: {{ .ssl | tojson }} + {{ end }} tags: {{.tags | tojson}} diff --git a/x-pack/filebeat/module/cisco/asa/manifest.yml b/x-pack/filebeat/module/cisco/asa/manifest.yml index 3c185f7980c..184df5404ad 100644 --- a/x-pack/filebeat/module/cisco/asa/manifest.yml +++ b/x-pack/filebeat/module/cisco/asa/manifest.yml @@ -11,7 +11,8 @@ var: - name: syslog_port default: 9001 - name: input - default: syslog + default: udp + - name: ssl - name: log_level default: 7 # if ES < 6.1.0, this flag switches to false automatically when evaluating the diff --git a/x-pack/filebeat/module/cisco/ftd/config/input.yml b/x-pack/filebeat/module/cisco/ftd/config/input.yml index b29aa4c725f..cb9df5bd6ec 100644 --- a/x-pack/filebeat/module/cisco/ftd/config/input.yml +++ b/x-pack/filebeat/module/cisco/ftd/config/input.yml @@ -1,9 +1,4 @@ -{{ if eq .input "syslog" }} - -type: udp -host: "{{.syslog_host}}:{{.syslog_port}}" - -{{ else if eq .input "file" }} +{{ if eq .input "file" }} type: log paths: @@ -12,6 +7,12 @@ paths: {{ end }} exclude_files: [".gz$"] +{{ else }} + +type: {{.input}} +host: "{{.syslog_host}}:{{.syslog_port}}" +ssl: {{ .ssl | tojson }} + {{ end }} tags: {{.tags | tojson}} diff --git a/x-pack/filebeat/module/cisco/ftd/manifest.yml b/x-pack/filebeat/module/cisco/ftd/manifest.yml index 31eb9659a6b..d681ff4d323 100644 --- a/x-pack/filebeat/module/cisco/ftd/manifest.yml +++ b/x-pack/filebeat/module/cisco/ftd/manifest.yml @@ -11,7 +11,8 @@ var: - name: syslog_port default: 9003 - name: input - default: syslog + default: udp + - name: ssl - name: log_level default: 7 # if ES < 6.1.0, this flag switches to false automatically when evaluating the diff --git a/x-pack/filebeat/module/cisco/ios/config/input.yml b/x-pack/filebeat/module/cisco/ios/config/input.yml index d911aa3ed9e..979f9cf380b 100644 --- a/x-pack/filebeat/module/cisco/ios/config/input.yml +++ b/x-pack/filebeat/module/cisco/ios/config/input.yml @@ -1,10 +1,4 @@ -{{ if eq .input "syslog" }} - -type: syslog -protocol.udp: - host: "{{.syslog_host}}:{{.syslog_port}}" - -{{ else if eq .input "file" }} +{{ if eq .input "file" }} type: log paths: @@ -13,6 +7,12 @@ paths: {{ end }} exclude_files: [".gz$"] +{{ else if eq .input "syslog" }} + +type: syslog +protocol.{{.syslog_protocol}}: + host: "{{.syslog_host}}:{{.syslog_port}}" + {{ end }} tags: {{.tags | tojson}} diff --git a/x-pack/filebeat/module/cisco/ios/manifest.yml b/x-pack/filebeat/module/cisco/ios/manifest.yml index e67f5c2f729..169e909fd89 100644 --- a/x-pack/filebeat/module/cisco/ios/manifest.yml +++ b/x-pack/filebeat/module/cisco/ios/manifest.yml @@ -10,6 +10,8 @@ var: default: localhost - name: syslog_port default: 9002 + - name: syslog_protocol + default: udp - name: input default: syslog diff --git a/x-pack/filebeat/modules.d/cisco.yml.disabled b/x-pack/filebeat/modules.d/cisco.yml.disabled index 3ad2d76a875..2d267c68a69 100644 --- a/x-pack/filebeat/modules.d/cisco.yml.disabled +++ b/x-pack/filebeat/modules.d/cisco.yml.disabled @@ -5,16 +5,23 @@ asa: enabled: false - # Set which input to use between syslog (default) or file. - #var.input: syslog + # Set which input to use between udp (default), tcp or file. + #var.input: udp - # The interface to listen to UDP based syslog traffic. Defaults to + # The interface to listen to udp or tcp syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost - # The UDP port to listen for syslog traffic. Defaults to 9001. + # The port to listen for udp or tcp syslog traffic. Defaults to 9001. #var.syslog_port: 9001 + # With tcp input, set the optional tls configuration: + #var.ssl: + # enabled: true + # certificate: /path/to/cert.pem + # key: /path/to/privatekey.pem + # key_passphrase: 'password for my key' + # Set the log level from 1 (alerts only) to 7 (include all messages). # Messages with a log level higher than the specified will be dropped. # See https://www.cisco.com/c/en/us/td/docs/security/asa/syslog/b_syslog/syslogs-sev-level.html @@ -31,16 +38,23 @@ ftd: enabled: false - # Set which input to use between syslog (default) or file. - #var.input: syslog + # Set which input to use between udp (default), tcp or file. + #var.input: udp - # The interface to listen to UDP based syslog traffic. Defaults to + # The interface to listen to tcp or udp syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost - # The UDP port to listen for syslog traffic. Defaults to 9003. + # The UDP port to listen for tcp or udp syslog traffic. Defaults to 9003. #var.syslog_port: 9003 + # With tcp input, set the optional tls configuration: + #var.ssl: + # enabled: true + # certificate: /path/to/cert.pem + # key: /path/to/privatekey.pem + # key_passphrase: 'password for my key' + # Set the log level from 1 (alerts only) to 7 (include all messages). # Messages with a log level higher than the specified will be dropped. # See https://www.cisco.com/c/en/us/td/docs/security/firepower/Syslogs/b_fptd_syslog_guide/syslogs-sev-level.html @@ -60,13 +74,16 @@ # Set which input to use between syslog (default) or file. #var.input: syslog - # The interface to listen to UDP based syslog traffic. Defaults to + # The interface to listen to syslog traffic. Defaults to # localhost. Set to 0.0.0.0 to bind to all available interfaces. #var.syslog_host: localhost - # The UDP port to listen for syslog traffic. Defaults to 9002. + # The port to listen on for syslog traffic. Defaults to 9002. #var.syslog_port: 9002 + # Set which protocol to use between udp (default) or tcp. + #var.syslog_protocol: udp + # Set custom paths for the log files when using file input. If left empty, # Filebeat will choose the paths depending on your OS. #var.paths: From b5e94143d774f2434432a578f7ef5bbd71002bca Mon Sep 17 00:00:00 2001 From: Taylor Swanson <90622908+taylor-swanson@users.noreply.github.com> Date: Thu, 16 Dec 2021 09:13:07 -0600 Subject: [PATCH 14/57] [Winlogbeat] Add support for custom XML queries (#29330) - Added new configuration field (xml_query) to support custom XML queries - This new configuration item will conflict with existing simple query configuration items (ignore_older, event_id, level, provider) - Validator has been updated to check for key conflicts and XML syntax, but does not check for correctness of XML schema. - Added unit tests for config validation - Added unit/system test for XML query runner --- CHANGELOG.next.asciidoc | 1 + winlogbeat/_meta/config/header.yml.tmpl | 8 +- winlogbeat/docs/winlogbeat-options.asciidoc | 58 ++++++- winlogbeat/eventlog/factory.go | 6 +- winlogbeat/eventlog/wineventlog.go | 74 ++++++--- .../eventlog/wineventlog_experimental.go | 60 ++++--- winlogbeat/eventlog/wineventlog_test.go | 147 +++++++++++++++++- winlogbeat/winlogbeat.reference.yml | 8 +- winlogbeat/winlogbeat.yml | 8 +- x-pack/winlogbeat/winlogbeat.reference.yml | 8 +- x-pack/winlogbeat/winlogbeat.yml | 8 +- 11 files changed, 323 insertions(+), 63 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index cef220be926..bcc93aa5636 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -296,6 +296,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add more DNS error codes to the Sysmon module. {issue}15685[15685] - Add configuration option for registry file flush timeout {issue}29001[29001] {pull}29053[29053] +- Add support for custom XML queries {issue}1054[1054] {pull}29330[29330] *Elastic Log Driver* diff --git a/winlogbeat/_meta/config/header.yml.tmpl b/winlogbeat/_meta/config/header.yml.tmpl index ec53124d5f3..7d966b36524 100644 --- a/winlogbeat/_meta/config/header.yml.tmpl +++ b/winlogbeat/_meta/config/header.yml.tmpl @@ -26,7 +26,9 @@ # accompanying options. The YAML data type of event_logs is a list of # dictionaries. # -# The supported keys are name (required), tags, fields, fields_under_root, -# forwarded, ignore_older, level, event_id, provider, and include_xml. Please -# visit the documentation for the complete details of each option. +# The supported keys are name, id, xml_query, tags, fields, fields_under_root, +# forwarded, ignore_older, level, event_id, provider, and include_xml. +# The xml_query key requires an id and must not be used with the name, +# ignore_older, level, event_id, or provider keys. Please visit the +# documentation for the complete details of each option. # https://go.es.io/WinlogbeatConfig diff --git a/winlogbeat/docs/winlogbeat-options.asciidoc b/winlogbeat/docs/winlogbeat-options.asciidoc index dd2353eb729..23e63803089 100644 --- a/winlogbeat/docs/winlogbeat-options.asciidoc +++ b/winlogbeat/docs/winlogbeat-options.asciidoc @@ -87,7 +87,6 @@ winlogbeat.shutdown_timeout: 30s A list of entries (called 'dictionaries' in YAML) that specify which event logs to monitor. Each entry in the list defines an event log to monitor as well as any information to be associated with the event log (filter, tags, and so on). -The `name` field is the only required field for each event log. [source,yaml] -------------------------------------------------------------------------------- @@ -113,9 +112,9 @@ reading additional event log records. ==== `event_logs.name` The name of the event log to monitor. Each dictionary under `event_logs` must -have a `name` field. You can get a list of available event logs by running -`Get-EventLog *` in PowerShell. Here is a sample of the output from the -command: +have a `name` field, except for those which use a custom XML query. You can +get a list of available event logs by running `Get-EventLog *` in PowerShell. +Here is a sample of the output from the command: [source,sh] -------------------------------------------------------------------------------- @@ -173,6 +172,28 @@ winlogbeat.event_logs: - name: 'C:\backup\sysmon-2019.08.evtx' -------------------------------------------------------------------------------- +The name key must not be used with custom XML queries. + +[float] +==== `event_logs.id` + +A unique identifier for the event log. This key is required when using a custom +XML query. + +It is used to uniquely identify the event log reader in the registry file. This is +useful if multiple event logs are being set up to watch the same channel or file. If an +ID is not given, the `event_logs.name` value will be used. + +This value must be unique. + +[source,yaml] +-------------------------------------------------------------------------------- +winlogbeat.event_logs: + - name: Application + id: application-logs + ignore_older: 168h +-------------------------------------------------------------------------------- + [float] ==== `event_logs.ignore_older` @@ -335,6 +356,35 @@ Microsoft-Windows-Security-Auditing Microsoft-Windows-Eventlog -------------------------------------------------------------------------------- +[float] +==== `event_logs.xml_query` + +Provide a custom XML query. This option is mutually exclusive with the `name`, `event_id`, +`ignore_older`, `level`, and `provider` options. These options should be included in +the XML query directly. Furthermore, an `id` must be provided. Custom XML queries +provide more flexibility and advanced options than the simpler query options in {beatname_uc}. +*{vista_and_newer}* + +Here is a configuration which will collect DHCP server events from multiple channels: + +[source,yaml] +-------------------------------------------------------------------------------- +winlogbeat.event_logs: + - id: dhcp-server-logs + xml_query: > + + + + + + + +-------------------------------------------------------------------------------- + +XML queries may also be created in Windows Event Viewer using custom views. The query +can be created using a graphical interface and the corresponding XML can be +retrieved from the XML tab. + [float] ==== `event_logs.include_xml` diff --git a/winlogbeat/eventlog/factory.go b/winlogbeat/eventlog/factory.go index 9ee8e0d144f..bb07c07ba1e 100644 --- a/winlogbeat/eventlog/factory.go +++ b/winlogbeat/eventlog/factory.go @@ -29,8 +29,10 @@ import ( // EventLog. Each implementation is free to support additional configuration // options. type ConfigCommon struct { - API string `config:"api"` // Name of the API to use. Optional. - Name string `config:"name"` // Name of the event log or channel or file. + API string `config:"api"` // Name of the API to use. Optional. + Name string `config:"name"` // Name of the event log or channel or file. + ID string `config:"id"` // Identifier for the event log. + XMLQuery string `config:"xml_query"` // Custom query XML. Must not be used with the keys from eventlog.query. } type validator interface { diff --git a/winlogbeat/eventlog/wineventlog.go b/winlogbeat/eventlog/wineventlog.go index 9e52cf2bdaa..7ec830f220a 100644 --- a/winlogbeat/eventlog/wineventlog.go +++ b/winlogbeat/eventlog/wineventlog.go @@ -21,6 +21,7 @@ package eventlog import ( + "encoding/xml" "fmt" "io" "path/filepath" @@ -113,7 +114,30 @@ type query struct { // any problems or nil. func (c *winEventLogConfig) Validate() error { var errs multierror.Errors - if c.Name == "" { + + if c.XMLQuery != "" { + if c.ID == "" { + errs = append(errs, fmt.Errorf("event log is missing an 'id'")) + } + + // Check for XML syntax errors. This does not check the validity of the query itself. + if err := xml.Unmarshal([]byte(c.XMLQuery), &struct{}{}); err != nil { + errs = append(errs, fmt.Errorf("invalid xml_query: %w", err)) + } + + switch { + case c.Name != "": + errs = append(errs, fmt.Errorf("xml_query cannot be used with 'name'")) + case c.SimpleQuery.IgnoreOlder != 0: + errs = append(errs, fmt.Errorf("xml_query cannot be used with 'ignore_older'")) + case c.SimpleQuery.Level != "": + errs = append(errs, fmt.Errorf("xml_query cannot be used with 'level'")) + case c.SimpleQuery.EventID != "": + errs = append(errs, fmt.Errorf("xml_query cannot be used with 'event_id'")) + case len(c.SimpleQuery.Provider) != 0: + errs = append(errs, fmt.Errorf("xml_query cannot be used with 'provider'")) + } + } else if c.Name == "" { errs = append(errs, fmt.Errorf("event log is missing a 'name'")) } @@ -128,6 +152,7 @@ var _ EventLog = &winEventLog{} type winEventLog struct { config winEventLogConfig query string + id string // Identifier of this event log. channelName string // Name of the channel from which to read. file bool // Reading from file rather than channel. subscription win.EvtHandle // Handle to the subscription. @@ -144,7 +169,7 @@ type winEventLog struct { // Name returns the name of the event log (i.e. Application, Security, etc.). func (l *winEventLog) Name() string { - return l.channelName + return l.id } func (l *winEventLog) Open(state checkpoint.EventLogState) error { @@ -152,7 +177,7 @@ func (l *winEventLog) Open(state checkpoint.EventLogState) error { var err error if len(state.Bookmark) > 0 { bookmark, err = win.CreateBookmarkFromXML(state.Bookmark) - } else if state.RecordNumber > 0 { + } else if state.RecordNumber > 0 && l.channelName != "" { bookmark, err = win.CreateBookmarkFromRecordID(l.channelName, state.RecordNumber) } if err != nil { @@ -267,7 +292,7 @@ func (l *winEventLog) Read() ([]Record, error) { r, _ := l.buildRecordFromXML(l.outputBuf.Bytes(), err) r.Offset = checkpoint.EventLogState{ - Name: l.channelName, + Name: l.id, RecordNumber: r.RecordID, Timestamp: r.TimeCreated.SystemTime, } @@ -356,7 +381,7 @@ func (l *winEventLog) buildRecordFromXML(x []byte, recoveredErr error) (Record, } if l.file { - r.File = l.channelName + r.File = l.id } if includeXML { @@ -374,20 +399,32 @@ func newEventLogging(options *common.Config) (EventLog, error) { // newWinEventLog creates and returns a new EventLog for reading event logs // using the Windows Event Log. func newWinEventLog(options *common.Config) (EventLog, error) { + var xmlQuery string + var err error + c := defaultWinEventLogConfig - if err := readConfig(options, &c); err != nil { + if err = readConfig(options, &c); err != nil { return nil, err } - query, err := win.Query{ - Log: c.Name, - IgnoreOlder: c.SimpleQuery.IgnoreOlder, - Level: c.SimpleQuery.Level, - EventID: c.SimpleQuery.EventID, - Provider: c.SimpleQuery.Provider, - }.Build() - if err != nil { - return nil, err + id := c.ID + if id == "" { + id = c.Name + } + + if c.XMLQuery != "" { + xmlQuery = c.XMLQuery + } else { + xmlQuery, err = win.Query{ + Log: c.Name, + IgnoreOlder: c.SimpleQuery.IgnoreOlder, + Level: c.SimpleQuery.Level, + EventID: c.SimpleQuery.EventID, + Provider: c.SimpleQuery.Provider, + }.Build() + if err != nil { + return nil, err + } } eventMetadataHandle := func(providerName, sourceName string) sys.MessageFiles { @@ -411,15 +448,16 @@ func newWinEventLog(options *common.Config) (EventLog, error) { } l := &winEventLog{ + id: id, config: c, - query: query, + query: xmlQuery, channelName: c.Name, file: filepath.IsAbs(c.Name), maxRead: c.BatchReadSize, renderBuf: make([]byte, renderBufferSize), outputBuf: sys.NewByteBuffer(renderBufferSize), - cache: newMessageFilesCache(c.Name, eventMetadataHandle, freeHandle), - logPrefix: fmt.Sprintf("WinEventLog[%s]", c.Name), + cache: newMessageFilesCache(id, eventMetadataHandle, freeHandle), + logPrefix: fmt.Sprintf("WinEventLog[%s]", id), } // Forwarded events should be rendered using RenderEventXML. It is more diff --git a/winlogbeat/eventlog/wineventlog_experimental.go b/winlogbeat/eventlog/wineventlog_experimental.go index 3a3ae182686..87eb4b32802 100644 --- a/winlogbeat/eventlog/wineventlog_experimental.go +++ b/winlogbeat/eventlog/wineventlog_experimental.go @@ -47,6 +47,7 @@ const ( type winEventLogExp struct { config winEventLogConfig query string + id string // Identifier of this event log. channelName string // Name of the channel from which to read. file bool // Reading from file rather than channel. maxRead int // Maximum number returned in one Read. @@ -59,7 +60,7 @@ type winEventLogExp struct { // Name returns the name of the event log (i.e. Application, Security, etc.). func (l *winEventLogExp) Name() string { - return l.channelName + return l.id } func (l *winEventLogExp) Open(state checkpoint.EventLogState) error { @@ -205,11 +206,11 @@ func (l *winEventLogExp) processHandle(h win.EvtHandle) (*Record, error) { } if l.file { - r.File = l.channelName + r.File = l.id } r.Offset = checkpoint.EventLogState{ - Name: l.channelName, + Name: l.id, RecordNumber: r.RecordID, Timestamp: r.TimeCreated.SystemTime, } @@ -241,6 +242,11 @@ func (l *winEventLogExp) Close() error { // newWinEventLogExp creates and returns a new EventLog for reading event logs // using the Windows Event Log. func newWinEventLogExp(options *common.Config) (EventLog, error) { + var xmlQuery string + var err error + var isFile bool + var log *logp.Logger + cfgwarn.Experimental("The %s event log reader is experimental.", winEventLogExpAPIName) c := winEventLogConfig{BatchReadSize: 512} @@ -248,30 +254,39 @@ func newWinEventLogExp(options *common.Config) (EventLog, error) { return nil, err } - queryLog := c.Name - isFile := false - if info, err := os.Stat(c.Name); err == nil && info.Mode().IsRegular() { - path, err := filepath.Abs(c.Name) + id := c.ID + if id == "" { + id = c.Name + } + + if c.XMLQuery != "" { + xmlQuery = c.XMLQuery + log = logp.NewLogger("wineventlog").With("id", id) + } else { + queryLog := c.Name + if info, err := os.Stat(c.Name); err == nil && info.Mode().IsRegular() { + path, err := filepath.Abs(c.Name) + if err != nil { + return nil, err + } + isFile = true + queryLog = "file://" + path + } + + xmlQuery, err = win.Query{ + Log: queryLog, + IgnoreOlder: c.SimpleQuery.IgnoreOlder, + Level: c.SimpleQuery.Level, + EventID: c.SimpleQuery.EventID, + Provider: c.SimpleQuery.Provider, + }.Build() if err != nil { return nil, err } - isFile = true - queryLog = "file://" + path - } - query, err := win.Query{ - Log: queryLog, - IgnoreOlder: c.SimpleQuery.IgnoreOlder, - Level: c.SimpleQuery.Level, - EventID: c.SimpleQuery.EventID, - Provider: c.SimpleQuery.Provider, - }.Build() - if err != nil { - return nil, err + log = logp.NewLogger("wineventlog").With("id", id).With("channel", c.Name) } - log := logp.NewLogger("wineventlog").With("channel", c.Name) - renderer, err := win.NewRenderer(win.NilHandle, log) if err != nil { return nil, err @@ -279,7 +294,8 @@ func newWinEventLogExp(options *common.Config) (EventLog, error) { l := &winEventLogExp{ config: c, - query: query, + query: xmlQuery, + id: id, channelName: c.Name, file: isFile, maxRead: c.BatchReadSize, diff --git a/winlogbeat/eventlog/wineventlog_test.go b/winlogbeat/eventlog/wineventlog_test.go index d6ddddcdd8b..c5d92ce1873 100644 --- a/winlogbeat/eventlog/wineventlog_test.go +++ b/winlogbeat/eventlog/wineventlog_test.go @@ -39,8 +39,13 @@ import ( const ( // Names that are registered by the test for logging events. - providerName = "WinlogbeatTestGo" - sourceName = "Integration Test" + providerName = "WinlogbeatTestGo" + sourceName = "Integration Test" + customXMLQuery = ` + + + +` // Event message files used when logging events. @@ -54,6 +59,116 @@ const ( netEventMsgFile = "%SystemRoot%\\System32\\netevent.dll" ) +func TestWinEventLogConfig_Validate(t *testing.T) { + tests := []struct { + In winEventLogConfig + WantErr bool + Desc string + }{ + { + In: winEventLogConfig{ + ConfigCommon: ConfigCommon{ + ID: "test", + XMLQuery: customXMLQuery, + }, + }, + WantErr: false, + Desc: "xml query: all good", + }, + { + In: winEventLogConfig{ + ConfigCommon: ConfigCommon{ + ID: "test", + XMLQuery: customXMLQuery[:len(customXMLQuery)-4], // Malformed XML by truncation. + }, + }, + WantErr: true, + Desc: "xml query: malformed XML", + }, + { + In: winEventLogConfig{ + ConfigCommon: ConfigCommon{ + XMLQuery: customXMLQuery, + }, + }, + WantErr: true, + Desc: "xml query: missing ID", + }, + { + In: winEventLogConfig{ + ConfigCommon: ConfigCommon{ + ID: "test", + Name: "test", + XMLQuery: customXMLQuery, + }, + }, + WantErr: true, + Desc: "xml query: conflicting keys (xml query and name)", + }, + { + In: winEventLogConfig{ + ConfigCommon: ConfigCommon{ + ID: "test", + XMLQuery: customXMLQuery, + }, + SimpleQuery: query{IgnoreOlder: 1}, + }, + WantErr: true, + Desc: "xml query: conflicting keys (xml query and ignore_older)", + }, + { + In: winEventLogConfig{ + ConfigCommon: ConfigCommon{ + ID: "test", + XMLQuery: customXMLQuery, + }, + SimpleQuery: query{Level: "error"}, + }, + WantErr: true, + Desc: "xml query: conflicting keys (xml query and level)", + }, + { + In: winEventLogConfig{ + ConfigCommon: ConfigCommon{ + ID: "test", + XMLQuery: customXMLQuery, + }, + SimpleQuery: query{EventID: "1000"}, + }, + WantErr: true, + Desc: "xml query: conflicting keys (xml query and event_id)", + }, + { + In: winEventLogConfig{ + ConfigCommon: ConfigCommon{ + ID: "test", + XMLQuery: customXMLQuery, + }, + SimpleQuery: query{Provider: []string{providerName}}, + }, + WantErr: true, + Desc: "xml query: conflicting keys (xml query and provider)", + }, + { + In: winEventLogConfig{ + ConfigCommon: ConfigCommon{}, + }, + WantErr: true, + Desc: "missing name", + }, + } + + for _, tc := range tests { + gotErr := tc.In.Validate() + + if tc.WantErr { + assert.NotNil(t, gotErr, tc.Desc) + } else { + assert.Nil(t, gotErr, "%q got unexpected err: %v", tc.Desc, gotErr) + } + } +} + func TestWindowsEventLogAPI(t *testing.T) { testWindowsEventLog(t, winEventLogAPIName) } @@ -79,6 +194,34 @@ func testWindowsEventLog(t *testing.T, api string) { return openLog(t, api, nil, config) } + // Test reading from an event log using a custom XML query. + t.Run("custom_xml_query", func(t *testing.T) { + cfg := map[string]interface{}{ + "id": "custom-xml-query", + "xml_query": customXMLQuery, + } + + log := openLog(t, cfg) + defer log.Close() + + var eventCount int + + for eventCount < totalEvents { + records, err := log.Read() + if err != nil { + t.Fatal("read error", err) + } + if len(records) == 0 { + t.Fatal("read returned 0 records") + } + + t.Logf("Read() returned %d events.", len(records)) + eventCount += len(records) + } + + assert.Equal(t, totalEvents, eventCount) + }) + t.Run("batch_read_size_config", func(t *testing.T) { const batchReadSize = 2 diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index afa6ec97eb3..11a5ac82351 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -24,9 +24,11 @@ # accompanying options. The YAML data type of event_logs is a list of # dictionaries. # -# The supported keys are name (required), tags, fields, fields_under_root, -# forwarded, ignore_older, level, event_id, provider, and include_xml. Please -# visit the documentation for the complete details of each option. +# The supported keys are name, id, xml_query, tags, fields, fields_under_root, +# forwarded, ignore_older, level, event_id, provider, and include_xml. +# The xml_query key requires an id and must not be used with the name, +# ignore_older, level, event_id, or provider keys. Please visit the +# documentation for the complete details of each option. # https://go.es.io/WinlogbeatConfig winlogbeat.event_logs: diff --git a/winlogbeat/winlogbeat.yml b/winlogbeat/winlogbeat.yml index 614b2817225..bdb1f706fa6 100644 --- a/winlogbeat/winlogbeat.yml +++ b/winlogbeat/winlogbeat.yml @@ -13,9 +13,11 @@ # accompanying options. The YAML data type of event_logs is a list of # dictionaries. # -# The supported keys are name (required), tags, fields, fields_under_root, -# forwarded, ignore_older, level, event_id, provider, and include_xml. Please -# visit the documentation for the complete details of each option. +# The supported keys are name, id, xml_query, tags, fields, fields_under_root, +# forwarded, ignore_older, level, event_id, provider, and include_xml. +# The xml_query key requires an id and must not be used with the name, +# ignore_older, level, event_id, or provider keys. Please visit the +# documentation for the complete details of each option. # https://go.es.io/WinlogbeatConfig winlogbeat.event_logs: diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index 8d9d4ef2566..940cd6125a0 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -24,9 +24,11 @@ # accompanying options. The YAML data type of event_logs is a list of # dictionaries. # -# The supported keys are name (required), tags, fields, fields_under_root, -# forwarded, ignore_older, level, event_id, provider, and include_xml. Please -# visit the documentation for the complete details of each option. +# The supported keys are name, id, xml_query, tags, fields, fields_under_root, +# forwarded, ignore_older, level, event_id, provider, and include_xml. +# The xml_query key requires an id and must not be used with the name, +# ignore_older, level, event_id, or provider keys. Please visit the +# documentation for the complete details of each option. # https://go.es.io/WinlogbeatConfig winlogbeat.event_logs: diff --git a/x-pack/winlogbeat/winlogbeat.yml b/x-pack/winlogbeat/winlogbeat.yml index c0c01d2ceea..15c1e10fdcc 100644 --- a/x-pack/winlogbeat/winlogbeat.yml +++ b/x-pack/winlogbeat/winlogbeat.yml @@ -13,9 +13,11 @@ # accompanying options. The YAML data type of event_logs is a list of # dictionaries. # -# The supported keys are name (required), tags, fields, fields_under_root, -# forwarded, ignore_older, level, event_id, provider, and include_xml. Please -# visit the documentation for the complete details of each option. +# The supported keys are name, id, xml_query, tags, fields, fields_under_root, +# forwarded, ignore_older, level, event_id, provider, and include_xml. +# The xml_query key requires an id and must not be used with the name, +# ignore_older, level, event_id, or provider keys. Please visit the +# documentation for the complete details of each option. # https://go.es.io/WinlogbeatConfig winlogbeat.event_logs: From d4016812b8ae0be884fef6d56e404db6f5438a9f Mon Sep 17 00:00:00 2001 From: Denis Rechkunov Date: Thu, 16 Dec 2021 21:03:03 +0100 Subject: [PATCH 15/57] Drop event batch when get HTTP status 413 from ES (#29368) To prevent infinite loops when having `http.max_content_length` set too low or `bulk_max_size` too high we now handle this status code separately and drop the whole event batch producing a detailed error message on the console. --- CHANGELOG-developer.next.asciidoc | 1 + libbeat/outputs/elasticsearch/client.go | 14 ++- libbeat/outputs/elasticsearch/client_test.go | 97 ++++++++++++++++++++ 3 files changed, 110 insertions(+), 2 deletions(-) diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index e640b5a0334..dcb2a5665f4 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -68,6 +68,7 @@ The list below covers the major changes between 7.0.0-rc2 and master only. - Remove `event.dataset` (ECS) annotion from `libbeat.logp`. {issue}27404[27404] - Errors should be thrown as errors. Metricsets inside Metricbeat will now throw errors as the `error` log level. {pull}27804[27804] - Avoid panicking in `add_fields` processor when input event.Fields is a nil map. {pull}28219[28219] +- Drop event batch when get HTTP status 413 from Elasticsearch to avoid infinite loop {issue}14350[14350] {pull}29368[29368] ==== Added diff --git a/libbeat/outputs/elasticsearch/client.go b/libbeat/outputs/elasticsearch/client.go index deab29c3dcd..0d9d619b9f5 100644 --- a/libbeat/outputs/elasticsearch/client.go +++ b/libbeat/outputs/elasticsearch/client.go @@ -38,6 +38,8 @@ import ( "github.com/elastic/beats/v7/libbeat/testing" ) +var errPayloadTooLarge = errors.New("the bulk payload is too large for the server. Consider to adjust `http.max_content_length` parameter in Elasticsearch or `bulk_max_size` in the beat. The batch has been dropped") + // Client is an elasticsearch client. type Client struct { conn eslegclient.Connection @@ -180,9 +182,13 @@ func (client *Client) Clone() *Client { func (client *Client) Publish(ctx context.Context, batch publisher.Batch) error { events := batch.Events() rest, err := client.publishEvents(ctx, events) - if len(rest) == 0 { + + switch { + case err == errPayloadTooLarge: + batch.Drop() + case len(rest) == 0: batch.ACK() - } else { + default: batch.RetryEvents(rest) } return err @@ -220,7 +226,11 @@ func (client *Client) publishEvents(ctx context.Context, data []publisher.Event) } status, result, sendErr := client.conn.Bulk(ctx, "", "", nil, bulkItems) + if sendErr != nil { + if status == http.StatusRequestEntityTooLarge { + sendErr = errPayloadTooLarge + } err := apm.CaptureError(ctx, fmt.Errorf("failed to perform any bulk index operations: %w", sendErr)) err.Send() client.log.Error(err) diff --git a/libbeat/outputs/elasticsearch/client_test.go b/libbeat/outputs/elasticsearch/client_test.go index 2a03d10481d..0a2ca672a7c 100644 --- a/libbeat/outputs/elasticsearch/client_test.go +++ b/libbeat/outputs/elasticsearch/client_test.go @@ -44,6 +44,103 @@ import ( "github.com/elastic/beats/v7/libbeat/version" ) +type testIndexSelector struct{} + +func (testIndexSelector) Select(event *beat.Event) (string, error) { + return "test", nil +} + +type batchMock struct { + // we embed the interface so we are able to implement the interface partially, + // only functions needed for tests are implemented + // if you use a function that is not implemented in the mock it will panic + publisher.Batch + events []publisher.Event + ack bool + drop bool + retryEvents []publisher.Event +} + +func (bm batchMock) Events() []publisher.Event { + return bm.events +} +func (bm *batchMock) ACK() { + bm.ack = true +} +func (bm *batchMock) Drop() { + bm.drop = true +} +func (bm *batchMock) RetryEvents(events []publisher.Event) { + bm.retryEvents = events +} + +func TestPublishStatusCode(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + event := publisher.Event{Content: beat.Event{Fields: common.MapStr{"field": 1}}} + events := []publisher.Event{event} + + t.Run("returns pre-defined error and drops batch when 413", func(t *testing.T) { + esMock := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusRequestEntityTooLarge) + w.Write([]byte("Request failed to get to the server (status code: 413)")) // actual response from ES + })) + defer esMock.Close() + + client, err := NewClient( + ClientSettings{ + ConnectionSettings: eslegclient.ConnectionSettings{ + URL: esMock.URL, + }, + Index: testIndexSelector{}, + }, + nil, + ) + assert.NoError(t, err) + + event := publisher.Event{Content: beat.Event{Fields: common.MapStr{"field": 1}}} + events := []publisher.Event{event} + batch := &batchMock{ + events: events, + } + + err = client.Publish(ctx, batch) + + assert.Error(t, err) + assert.Equal(t, errPayloadTooLarge, err, "should be a pre-defined error") + assert.True(t, batch.drop, "should must be dropped") + }) + + t.Run("retries the batch if bad HTTP status", func(t *testing.T) { + esMock := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer esMock.Close() + + client, err := NewClient( + ClientSettings{ + ConnectionSettings: eslegclient.ConnectionSettings{ + URL: esMock.URL, + }, + Index: testIndexSelector{}, + }, + nil, + ) + assert.NoError(t, err) + + batch := &batchMock{ + events: events, + } + + err = client.Publish(ctx, batch) + + assert.Error(t, err) + assert.False(t, batch.ack, "should not be acknowledged") + assert.Len(t, batch.retryEvents, len(events), "all events should be in retry") + }) +} + func TestCollectPublishFailsNone(t *testing.T) { client, err := NewClient( ClientSettings{ From 43ae3c9cbd5e02bf64aed8422501ac5f0ab0eac3 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Thu, 16 Dec 2021 22:06:44 +0200 Subject: [PATCH 16/57] Enhance filter check in k8s event metricset (#29470) --- CHANGELOG.next.asciidoc | 1 + libbeat/common/kubernetes/types.go | 5 +++++ metricbeat/module/kubernetes/event/event.go | 10 +++++++++- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index bcc93aa5636..4a69cc01758 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -187,6 +187,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Groups same timestamp metric values to one event in the app_insights metricset. {pull}20403[20403] - Use xpack.enabled on SM modules to write into .monitoring indices when using Metricbeat standalone {pull}28365[28365] - Fix in rename processor to ingest metrics for `write.iops` to proper field instead of `write_iops` in rds metricset. {pull}28960[28960] +- Enhance filter check in kubernetes event metricset. {pull}29470[29470] *Packetbeat* diff --git a/libbeat/common/kubernetes/types.go b/libbeat/common/kubernetes/types.go index 9f50e99b305..c3d1fefb01e 100644 --- a/libbeat/common/kubernetes/types.go +++ b/libbeat/common/kubernetes/types.go @@ -94,6 +94,11 @@ func Time(t *metav1.Time) time.Time { return t.Time } +// MicroTime extracts time from k8s.MicroTime type +func MicroTime(t *metav1.MicroTime) time.Time { + return t.Time +} + // ContainerID parses the container ID to get the actual ID string func ContainerID(s PodContainerStatus) string { cID, _ := ContainerIDWithRuntime(s) diff --git a/metricbeat/module/kubernetes/event/event.go b/metricbeat/module/kubernetes/event/event.go index 723e1331e61..9c844be950c 100644 --- a/metricbeat/module/kubernetes/event/event.go +++ b/metricbeat/module/kubernetes/event/event.go @@ -108,8 +108,16 @@ func (m *MetricSet) Run(reporter mb.PushReporter) { m.watcher.AddEventHandler(kubernetes.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { eve := obj.(*kubernetes.Event) + // if fields are null they are decoded to `0001-01-01 00:00:00 +0000 UTC` + // so we need to check if they are valid first + lastTimestampValid := !kubernetes.Time(&eve.LastTimestamp).IsZero() + eventTimeValid := !kubernetes.MicroTime(&eve.EventTime).IsZero() // if skipOlder, skip events happened before watch - if m.skipOlder && kubernetes.Time(&eve.LastTimestamp).Before(now) { + if m.skipOlder && kubernetes.Time(&eve.LastTimestamp).Before(now) && lastTimestampValid { + return false + } else if m.skipOlder && kubernetes.MicroTime(&eve.EventTime).Before(now) && eventTimeValid { + // there might be cases that `LastTimestamp` is not a valid number so double check + // with `EventTime` return false } return true From 8921ea4a4cbff116b24a2cae6b2a5e6d6e08d851 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Thu, 16 Dec 2021 21:21:14 +0000 Subject: [PATCH 17/57] ci: enable orka (#29426) --- Jenkinsfile | 2 +- auditbeat/Jenkinsfile.yml | 2 +- filebeat/Jenkinsfile.yml | 2 +- heartbeat/Jenkinsfile.yml | 2 +- metricbeat/Jenkinsfile.yml | 2 +- packetbeat/Jenkinsfile.yml | 2 +- x-pack/auditbeat/Jenkinsfile.yml | 2 +- x-pack/auditbeat/tests/system/test_metricsets.py | 1 + x-pack/elastic-agent/Jenkinsfile.yml | 2 +- x-pack/filebeat/Jenkinsfile.yml | 2 +- x-pack/functionbeat/Jenkinsfile.yml | 2 +- x-pack/heartbeat/Jenkinsfile.yml | 2 +- x-pack/metricbeat/Jenkinsfile.yml | 2 +- x-pack/osquerybeat/Jenkinsfile.yml | 2 +- x-pack/packetbeat/Jenkinsfile.yml | 2 +- 15 files changed, 15 insertions(+), 14 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 9d5aca6a4ce..19978442bf8 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -634,7 +634,7 @@ def withBeatsEnv(Map args = [:], Closure body) { if(isUnix()) { gox_flags = (isArm() && is64arm()) ? '-arch arm' : '-arch amd64' - path = "${env.WORKSPACE}/bin:${env.PATH}" + path = "${env.WORKSPACE}/bin:${env.PATH}:/usr/local/bin" magefile = "${WORKSPACE}/.magefile" pythonEnv = "${WORKSPACE}/python-env" testResults = '**/build/TEST*.xml' diff --git a/auditbeat/Jenkinsfile.yml b/auditbeat/Jenkinsfile.yml index ec64b4fc3d7..7b17f617776 100644 --- a/auditbeat/Jenkinsfile.yml +++ b/auditbeat/Jenkinsfile.yml @@ -44,7 +44,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test auditbeat for macos" diff --git a/filebeat/Jenkinsfile.yml b/filebeat/Jenkinsfile.yml index 758f0d496a0..f5843c43b18 100644 --- a/filebeat/Jenkinsfile.yml +++ b/filebeat/Jenkinsfile.yml @@ -55,7 +55,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test filebeat for macos" diff --git a/heartbeat/Jenkinsfile.yml b/heartbeat/Jenkinsfile.yml index c6603f1b969..4eae0d100ef 100644 --- a/heartbeat/Jenkinsfile.yml +++ b/heartbeat/Jenkinsfile.yml @@ -49,7 +49,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test heartbeat for macos" diff --git a/metricbeat/Jenkinsfile.yml b/metricbeat/Jenkinsfile.yml index 3a66f3e29a7..e7ce5009caf 100644 --- a/metricbeat/Jenkinsfile.yml +++ b/metricbeat/Jenkinsfile.yml @@ -38,7 +38,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test metricbeat for macos" diff --git a/packetbeat/Jenkinsfile.yml b/packetbeat/Jenkinsfile.yml index 5d09046407a..bc0f8a13bee 100644 --- a/packetbeat/Jenkinsfile.yml +++ b/packetbeat/Jenkinsfile.yml @@ -42,7 +42,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test packetbeat for macos" diff --git a/x-pack/auditbeat/Jenkinsfile.yml b/x-pack/auditbeat/Jenkinsfile.yml index aef0ec1fbe5..9dbb5f77dfa 100644 --- a/x-pack/auditbeat/Jenkinsfile.yml +++ b/x-pack/auditbeat/Jenkinsfile.yml @@ -42,7 +42,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test x-pack/auditbeat for macos" diff --git a/x-pack/auditbeat/tests/system/test_metricsets.py b/x-pack/auditbeat/tests/system/test_metricsets.py index 2d34f8e4c18..6294d5a02d1 100644 --- a/x-pack/auditbeat/tests/system/test_metricsets.py +++ b/x-pack/auditbeat/tests/system/test_metricsets.py @@ -48,6 +48,7 @@ def test_metricset_login(self): @unittest.skipIf(sys.platform == "win32", "Not implemented for Windows") @unittest.skipIf(sys.platform.startswith('linux') and not (os.path.isdir("/var/lib/dpkg") or os.path.isdir("/var/lib/rpm")), "Only implemented for dpkg and rpm") + @unittest.skipIf(sys.platform.startswith('darwin'), "See https://github.com/elastic/beats/issues/21308") def test_metricset_package(self): """ package metricset collects information about installed packages on a system. diff --git a/x-pack/elastic-agent/Jenkinsfile.yml b/x-pack/elastic-agent/Jenkinsfile.yml index a80fdb1297c..d2381654009 100644 --- a/x-pack/elastic-agent/Jenkinsfile.yml +++ b/x-pack/elastic-agent/Jenkinsfile.yml @@ -42,7 +42,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test x-pack/elastic-agent for macos" diff --git a/x-pack/filebeat/Jenkinsfile.yml b/x-pack/filebeat/Jenkinsfile.yml index 8ea1c14fa03..c5647d9a854 100644 --- a/x-pack/filebeat/Jenkinsfile.yml +++ b/x-pack/filebeat/Jenkinsfile.yml @@ -55,7 +55,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test x-pack/filebeat for macos" diff --git a/x-pack/functionbeat/Jenkinsfile.yml b/x-pack/functionbeat/Jenkinsfile.yml index 84e95e829ea..f3b4bf10973 100644 --- a/x-pack/functionbeat/Jenkinsfile.yml +++ b/x-pack/functionbeat/Jenkinsfile.yml @@ -39,7 +39,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test x-pack/functionbeat for macos" diff --git a/x-pack/heartbeat/Jenkinsfile.yml b/x-pack/heartbeat/Jenkinsfile.yml index ca184f785f4..96a48489c62 100644 --- a/x-pack/heartbeat/Jenkinsfile.yml +++ b/x-pack/heartbeat/Jenkinsfile.yml @@ -30,7 +30,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test x-pack/heartbeat for macos" diff --git a/x-pack/metricbeat/Jenkinsfile.yml b/x-pack/metricbeat/Jenkinsfile.yml index 7a619ed6f16..db7fb380c50 100644 --- a/x-pack/metricbeat/Jenkinsfile.yml +++ b/x-pack/metricbeat/Jenkinsfile.yml @@ -60,7 +60,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test x-pack/metricbeat for macos" diff --git a/x-pack/osquerybeat/Jenkinsfile.yml b/x-pack/osquerybeat/Jenkinsfile.yml index fd55f11b63d..6ebd028027b 100644 --- a/x-pack/osquerybeat/Jenkinsfile.yml +++ b/x-pack/osquerybeat/Jenkinsfile.yml @@ -28,7 +28,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test x-pack/osquerybeat for macos" diff --git a/x-pack/packetbeat/Jenkinsfile.yml b/x-pack/packetbeat/Jenkinsfile.yml index a2550062fb7..1771a576f07 100644 --- a/x-pack/packetbeat/Jenkinsfile.yml +++ b/x-pack/packetbeat/Jenkinsfile.yml @@ -42,7 +42,7 @@ stages: macos: mage: "mage build unitTest" platforms: ## override default label in this specific stage. - - "macosx&&x86_64" + - "orka && darwin && poc" when: ## Override the top-level when. comments: - "/test x-pack/packetbeat for macos" From a0ef4d3af0388562674684e3fba3cb56008a5fbb Mon Sep 17 00:00:00 2001 From: Andrew Kroh Date: Thu, 16 Dec 2021 17:21:09 -0500 Subject: [PATCH 18/57] Stop calling ChangeMessageVisibility after ReceiptHandleIsInvalid (#29480) Stop the keepalive goroutine after ErrCodeReceiptHandleIsInvalid is returned by ChangeMessageVisibility. Add `message_receipt_time` to log messages associated with processing of a given SQS message. Fix incorrect error being wrapped when ApproximateReceiveCount threshold is reached. --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/awss3/sqs_s3_event.go | 18 +++++++++-- .../filebeat/input/awss3/sqs_s3_event_test.go | 32 +++++++++++++++++++ 3 files changed, 49 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 4a69cc01758..248e1618093 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -149,6 +149,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix `threatintel.misp` filters configuration. {issue}27970[27970] - Fix handling of escaped newlines in the `decode_cef` processor. {issue}16995[16995] {pull}29268[29268] - Fix `panw` module ingest errors for GLOBALPROTECT logs {pull}29154[29154] +- aws-s3: Stop trying to increase SQS message visibility after ReceiptHandleIsInvalid errors. {pull}29480[29480] - Fix handling of IPv6 addresses in netflow flow events. {issue}19210[19210] {pull}29383[29383] *Heartbeat* diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event.go b/x-pack/filebeat/input/awss3/sqs_s3_event.go index b6641a36c81..d1865aec9cd 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event.go @@ -14,6 +14,7 @@ import ( "sync" "time" + "github.com/aws/aws-sdk-go-v2/aws/awserr" "github.com/aws/aws-sdk-go-v2/service/sqs" "github.com/pkg/errors" "go.uber.org/multierr" @@ -105,7 +106,9 @@ func newSQSS3EventProcessor(log *logp.Logger, metrics *inputMetrics, sqs sqsAPI, } func (p *sqsS3EventProcessor) ProcessSQS(ctx context.Context, msg *sqs.Message) error { - log := p.log.With("message_id", *msg.MessageId) + log := p.log.With( + "message_id", *msg.MessageId, + "message_receipt_time", time.Now().UTC()) keepaliveCtx, keepaliveCancel := context.WithCancel(ctx) defer keepaliveCancel() @@ -137,7 +140,7 @@ func (p *sqsS3EventProcessor) ProcessSQS(ctx context.Context, msg *sqs.Message) if receiveCount, err := strconv.Atoi(v); err == nil && receiveCount >= p.maxReceiveCount { processingErr = nonRetryableErrorWrap(fmt.Errorf( "sqs ApproximateReceiveCount <%v> exceeds threshold %v: %w", - receiveCount, p.maxReceiveCount, err)) + receiveCount, p.maxReceiveCount, processingErr)) } } } @@ -180,6 +183,17 @@ func (p *sqsS3EventProcessor) keepalive(ctx context.Context, log *logp.Logger, w // Renew visibility. if err := p.sqs.ChangeMessageVisibility(ctx, msg, p.sqsVisibilityTimeout); err != nil { + var awsErr awserr.Error + if errors.As(err, &awsErr) { + switch awsErr.Code() { + case sqs.ErrCodeReceiptHandleIsInvalid: + log.Warnw("Failed to extend message visibility timeout "+ + "because SQS receipt handle is no longer valid. "+ + "Stopping SQS message keepalive routine.", "error", err) + return + } + } + log.Warnw("Failed to extend message visibility timeout.", "error", err) } } diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event_test.go b/x-pack/filebeat/input/awss3/sqs_s3_event_test.go index ad6d30056d4..6100dbe3119 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event_test.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event_test.go @@ -8,9 +8,12 @@ import ( "context" "errors" "fmt" + "sync" "testing" "time" + "github.com/aws/aws-sdk-go-v2/aws/awserr" + "github.com/aws/aws-sdk-go-v2/service/sqs" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -161,6 +164,35 @@ func TestSQSS3EventProcessor(t *testing.T) { }) } +func TestSqsProcessor_keepalive(t *testing.T) { + msg := newSQSMessage(newS3Event("log.json")) + + // Test will call ChangeMessageVisibility once and then keepalive will + // exit because the SQS receipt handle is not usable. + t.Run("keepalive stops after receipt handle is invalid", func(t *testing.T) { + const visibilityTimeout = time.Second + + ctx, cancel := context.WithTimeout(context.Background(), testTimeout) + defer cancel() + + ctrl, ctx := gomock.WithContext(ctx, t) + defer ctrl.Finish() + mockAPI := NewMockSQSAPI(ctrl) + mockS3HandlerFactory := NewMockS3ObjectHandlerFactory(ctrl) + + receiptHandleErr := awserr.New(sqs.ErrCodeReceiptHandleIsInvalid, "fake receipt handle is invalid.", nil) + + mockAPI.EXPECT().ChangeMessageVisibility(gomock.Any(), gomock.Eq(&msg), gomock.Eq(visibilityTimeout)). + Times(1).Return(receiptHandleErr) + + p := newSQSS3EventProcessor(logp.NewLogger(inputName), nil, mockAPI, nil, visibilityTimeout, 5, mockS3HandlerFactory) + var wg sync.WaitGroup + wg.Add(1) + p.keepalive(ctx, p.log, &wg, &msg) + wg.Wait() + }) +} + func TestSqsProcessor_getS3Notifications(t *testing.T) { logp.TestingSetup() From b71fc20b6ae5337122afb4499d8633ca782b7a7a Mon Sep 17 00:00:00 2001 From: Andres Rodriguez Date: Fri, 17 Dec 2021 07:04:40 +0100 Subject: [PATCH 19/57] Add backport rule for 7.17 branch (#29479) --- .mergify.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index ade2822ada9..fcf643d21ac 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -185,6 +185,19 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 7.17 branch + conditions: + - merged + - label=backport-v7.17.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.17" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - name: backport patches to 8.0 branch conditions: - merged From 5173e55bfb46b715372053a9bce75722a517a986 Mon Sep 17 00:00:00 2001 From: Andres Rodriguez Date: Fri, 17 Dec 2021 13:50:36 +0100 Subject: [PATCH 20/57] Cleanup breaking changes section (#29490) (#29492) (cherry picked from commit d9847d75ef2f681905ee0ef8f4913655ec20bada) --- CHANGELOG.next.asciidoc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 248e1618093..770c17fb03c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -11,7 +11,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* - Remove the non-ECS `agent.hostname` field. Use the `agent.name` or `agent.id` fields for an identifier. {issue}16377[16377] {pull}18328[18328] -- Make error message about locked data path actionable. {pull}18667[18667] - Remove the deprecated `xpack.monitoring.*` settings. Going forward only `monitoring.*` settings may be used. {issue}9424[9424] {pull}18608[18608] - Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull}28006[28006] - Remove deprecated fields from kubernetes module {pull}28046[28046] @@ -38,12 +37,9 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix parsing of Elasticsearch node name by `elasticsearch/slowlog` fileset. {pull}14547[14547] - With the default configuration the following modules will no longer send the `host` field that contains information about the host on which Filebeat is running. You can revert this change by configuring tags for the module and omitting `forwarded` from the list. {issue}13920[13920] -- Preserve case of http.request.method. ECS prior to 1.6 specified normalizing to lowercase, which lost information. Affects filesets: apache/access, elasticsearch/audit, iis/access, iis/error, nginx/access, nginx/ingress_controller, aws/elb, suricata/eve, zeek/http. {issue}18154[18154] {pull}18359[18359] - With the default configuration the cloud modules (aws, azure, googlecloud, o365, okta) - With the default configuration the cef and panw modules will no longer send the `host` -- Preserve case of http.request.method. ECS prior to 1.6 specified normalizing to lowercase, which lost information. Affects filesets: apache/access, elasticsearch/audit, iis/access, iis/error, nginx/access, nginx/ingress_controller, aws/elb, suricata/eve, zeek/http. {issue}18154[18154] {pull}18359[18359] - Add `while_pattern` type to multiline reader. {pull}19662[19662] -- Add support for GMT timezone offsets in `decode_cef`. {pull}20993[20993] *Heartbeat* From b0d35a3d412e3d345f39306c4e22fd09242e9754 Mon Sep 17 00:00:00 2001 From: Kevin Lacabane Date: Fri, 17 Dec 2021 14:44:41 +0100 Subject: [PATCH 21/57] [monitoring] bump index name version (#29493) --- metricbeat/helper/elastic/elastic.go | 2 +- metricbeat/helper/elastic/elastic_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/metricbeat/helper/elastic/elastic.go b/metricbeat/helper/elastic/elastic.go index 81216bb0e39..837ff615064 100644 --- a/metricbeat/helper/elastic/elastic.go +++ b/metricbeat/helper/elastic/elastic.go @@ -79,7 +79,7 @@ func (p Product) String() string { // MakeXPackMonitoringIndexName method returns the name of the monitoring index for // a given product { elasticsearch, kibana, logstash, beats } func MakeXPackMonitoringIndexName(product Product) string { - const version = "7" + const version = "8" return fmt.Sprintf(".monitoring-%v-%v-mb", product.xPackMonitoringIndexString(), version) } diff --git a/metricbeat/helper/elastic/elastic_test.go b/metricbeat/helper/elastic/elastic_test.go index 26f6d4723d5..57f62d0d4d8 100644 --- a/metricbeat/helper/elastic/elastic_test.go +++ b/metricbeat/helper/elastic/elastic_test.go @@ -38,22 +38,22 @@ func TestMakeXPackMonitoringIndexName(t *testing.T) { { "Elasticsearch monitoring index", Elasticsearch, - ".monitoring-es-7-mb", + ".monitoring-es-8-mb", }, { "Kibana monitoring index", Kibana, - ".monitoring-kibana-7-mb", + ".monitoring-kibana-8-mb", }, { "Logstash monitoring index", Logstash, - ".monitoring-logstash-7-mb", + ".monitoring-logstash-8-mb", }, { "Beats monitoring index", Beats, - ".monitoring-beats-7-mb", + ".monitoring-beats-8-mb", }, } From c428cb17ff0aa6613cf065b2779481a8451e1658 Mon Sep 17 00:00:00 2001 From: Kevin Lacabane Date: Fri, 17 Dec 2021 15:32:51 +0100 Subject: [PATCH 22/57] [kibana module] write to rootfields instead of modulefields (#29494) --- metricbeat/module/kibana/stats/data.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metricbeat/module/kibana/stats/data.go b/metricbeat/module/kibana/stats/data.go index 79e6454f805..23696d98670 100644 --- a/metricbeat/module/kibana/stats/data.go +++ b/metricbeat/module/kibana/stats/data.go @@ -123,7 +123,7 @@ func eventMapping(r mb.ReporterV2, content []byte, isXpack bool) error { event.Error = elastic.MakeErrorForMissingField("cluster_uuid", elastic.Kibana) return event.Error } - event.ModuleFields.Put("elasticsearch.cluster.id", elasticsearchClusterID) + event.RootFields.Put("elasticsearch.cluster.id", elasticsearchClusterID) // Set service ID uuid, err := dataFields.GetValue("uuid") From b52ad7f9ed7a5bd28924acd88ccaf5f3b989c88b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Fri, 17 Dec 2021 16:27:05 +0100 Subject: [PATCH 23/57] Change renaming `@timestamp` to copying values in Ingest pipelines (#29425) --- .../module/nginx/ingress_controller/ingest/pipeline.yml | 6 +++--- .../module/checkpoint/firewall/ingest/pipeline.yml | 7 +++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/filebeat/module/nginx/ingress_controller/ingest/pipeline.yml b/filebeat/module/nginx/ingress_controller/ingest/pipeline.yml index bfe589cf703..eaed9342b61 100644 --- a/filebeat/module/nginx/ingress_controller/ingest/pipeline.yml +++ b/filebeat/module/nginx/ingress_controller/ingest/pipeline.yml @@ -208,9 +208,9 @@ processors: patterns: - ^%{IP:source.ip}$ ignore_failure: true - - rename: - field: "@timestamp" - target_field: event.created + - set: + copy_from: "@timestamp" + field: event.created - date: field: nginx.ingress_controller.time target_field: "@timestamp" diff --git a/x-pack/filebeat/module/checkpoint/firewall/ingest/pipeline.yml b/x-pack/filebeat/module/checkpoint/firewall/ingest/pipeline.yml index 13a92e10f31..b4dd786b094 100644 --- a/x-pack/filebeat/module/checkpoint/firewall/ingest/pipeline.yml +++ b/x-pack/filebeat/module/checkpoint/firewall/ingest/pipeline.yml @@ -40,10 +40,9 @@ processors: - message - host ignore_missing: true -- rename: - field: "@timestamp" - target_field: "event.created" - ignore_missing: true +- set: + copy_from: "@timestamp" + field: "event.created" - date: field: "syslog5424_ts" formats: ["ISO8601", "UNIX"] From 9fbbdc349b7d35e08cf35e2ada1acf299840f933 Mon Sep 17 00:00:00 2001 From: Julien Lind Date: Fri, 17 Dec 2021 16:40:58 +0100 Subject: [PATCH 24/57] Create elastic-agent-project-board.yml (#29500) --- .../workflows/elastic-agent-project-board.yml | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 .github/workflows/elastic-agent-project-board.yml diff --git a/.github/workflows/elastic-agent-project-board.yml b/.github/workflows/elastic-agent-project-board.yml new file mode 100644 index 00000000000..7a41ef501e3 --- /dev/null +++ b/.github/workflows/elastic-agent-project-board.yml @@ -0,0 +1,50 @@ +name: Add to Elastic Agent Data Plane or Control Plane Board +on: + issues: + types: + - labeled +jobs: + add_to_data_plane-project: + runs-on: ubuntu-latest + if: | + github.event.label.name == 'Team:Elastic-Agent-Data-Plane' + steps: + - uses: octokit/graphql-action@v2.x + id: add_to_project + with: + headers: '{"GraphQL-Features": "projects_next_graphql"}' + query: | + mutation add_to_project($projectid:String!,$contentid:String!) { + addProjectNextItem(input:{projectId:$projectid contentId:$contentid}) { + projectNextItem { + id + } + } + } + projectid: ${{ env.PROJECT_ID }} + contentid: ${{ github.event.issue.node_id }} + env: + PROJECT_ID: "PRO_kwDOAGc3Zs4AzG8z" + GITHUB_TOKEN: ${{ secrets.PROJECT_ASSIGNER_TOKEN }} + add_to_control_plane-project: + runs-on: ubuntu-latest + if: | + github.event.label.name == 'Team:Elastic-Agent-Control-Plane' + steps: + - uses: octokit/graphql-action@v2.x + id: add_to_project + with: + headers: '{"GraphQL-Features": "projects_next_graphql"}' + query: | + mutation add_to_project($projectid:String!,$contentid:String!) { + addProjectNextItem(input:{projectId:$projectid contentId:$contentid}) { + projectNextItem { + id + } + } + } + projectid: ${{ env.PROJECT_ID }} + contentid: ${{ github.event.issue.node_id }} + env: + PROJECT_ID: "PRO_kwDOAGc3Zs4AzG9E" + GITHUB_TOKEN: ${{ secrets.PROJECT_ASSIGNER_TOKEN }} From 21096a957ef490344c829e2f4e15d71a2abe66d4 Mon Sep 17 00:00:00 2001 From: Julien Lind Date: Fri, 17 Dec 2021 16:48:32 +0100 Subject: [PATCH 25/57] Update elastic-agent-project-board.yml --- .github/workflows/elastic-agent-project-board.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/elastic-agent-project-board.yml b/.github/workflows/elastic-agent-project-board.yml index 7a41ef501e3..edc4687349d 100644 --- a/.github/workflows/elastic-agent-project-board.yml +++ b/.github/workflows/elastic-agent-project-board.yml @@ -25,7 +25,7 @@ jobs: contentid: ${{ github.event.issue.node_id }} env: PROJECT_ID: "PRO_kwDOAGc3Zs4AzG8z" - GITHUB_TOKEN: ${{ secrets.PROJECT_ASSIGNER_TOKEN }} + GITHUB_TOKEN: ${{ secrets.ELASTIC_AGENT_PROJECT_BOARD_TOKEN }} add_to_control_plane-project: runs-on: ubuntu-latest if: | @@ -47,4 +47,4 @@ jobs: contentid: ${{ github.event.issue.node_id }} env: PROJECT_ID: "PRO_kwDOAGc3Zs4AzG9E" - GITHUB_TOKEN: ${{ secrets.PROJECT_ASSIGNER_TOKEN }} + GITHUB_TOKEN: ${{ secrets.ELASTIC_AGENT_PROJECT_BOARD_TOKEN }} From 8353bd7ea761ae15123904374063a7a3fb126ad4 Mon Sep 17 00:00:00 2001 From: Julien Lind Date: Fri, 17 Dec 2021 17:15:39 +0100 Subject: [PATCH 26/57] Update elastic-agent-project-board.yml --- .../workflows/elastic-agent-project-board.yml | 20 ++++++------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/.github/workflows/elastic-agent-project-board.yml b/.github/workflows/elastic-agent-project-board.yml index edc4687349d..4e11d39b87d 100644 --- a/.github/workflows/elastic-agent-project-board.yml +++ b/.github/workflows/elastic-agent-project-board.yml @@ -14,13 +14,9 @@ jobs: with: headers: '{"GraphQL-Features": "projects_next_graphql"}' query: | - mutation add_to_project($projectid:String!,$contentid:String!) { - addProjectNextItem(input:{projectId:$projectid contentId:$contentid}) { - projectNextItem { - id - } - } - } + updateIssue(input: {id:$contentid, projectIds:$projectid}) { + clientMutationId + } projectid: ${{ env.PROJECT_ID }} contentid: ${{ github.event.issue.node_id }} env: @@ -36,13 +32,9 @@ jobs: with: headers: '{"GraphQL-Features": "projects_next_graphql"}' query: | - mutation add_to_project($projectid:String!,$contentid:String!) { - addProjectNextItem(input:{projectId:$projectid contentId:$contentid}) { - projectNextItem { - id - } - } - } + updateIssue(input: {id:$contentid, projectIds:$projectid}) { + clientMutationId + } projectid: ${{ env.PROJECT_ID }} contentid: ${{ github.event.issue.node_id }} env: From 3e852fbbb1c66583aad8b7190f8116afe6f9aa9b Mon Sep 17 00:00:00 2001 From: Julien Lind Date: Fri, 17 Dec 2021 17:18:45 +0100 Subject: [PATCH 27/57] Update elastic-agent-project-board.yml --- .github/workflows/elastic-agent-project-board.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/elastic-agent-project-board.yml b/.github/workflows/elastic-agent-project-board.yml index 4e11d39b87d..979966f6b52 100644 --- a/.github/workflows/elastic-agent-project-board.yml +++ b/.github/workflows/elastic-agent-project-board.yml @@ -14,8 +14,10 @@ jobs: with: headers: '{"GraphQL-Features": "projects_next_graphql"}' query: | - updateIssue(input: {id:$contentid, projectIds:$projectid}) { - clientMutationId + mutation add_to_project($projectid:String!,$contentid:String!) { + updateIssue(input: {id:$contentid, projectIds:$projectid}) { + clientMutationId + } } projectid: ${{ env.PROJECT_ID }} contentid: ${{ github.event.issue.node_id }} @@ -32,9 +34,11 @@ jobs: with: headers: '{"GraphQL-Features": "projects_next_graphql"}' query: | - updateIssue(input: {id:$contentid, projectIds:$projectid}) { - clientMutationId - } + mutation add_to_project($projectid:String!,$contentid:String!) { + updateIssue(input: {id:$contentid, projectIds:$projectid}) { + clientMutationId + } + } projectid: ${{ env.PROJECT_ID }} contentid: ${{ github.event.issue.node_id }} env: From ecd68dbf492f9fbb5dc8a7b65664b9f0cfd85efa Mon Sep 17 00:00:00 2001 From: Dalibor P <9079844+dplavcic@users.noreply.github.com> Date: Fri, 17 Dec 2021 17:47:25 +0100 Subject: [PATCH 28/57] [Docs] Update logging.files.permissions documentation to consider umask (#20584) (#28347) Changes implemented in the (#14119) made all Beats-created files and folders apply an umask of 0027 (on POSIX systems). Co-authored-by: dplavcic --- filebeat/docs/filebeat-general-options.asciidoc | 6 +++--- .../docs/howto/override-config-settings.asciidoc | 2 +- libbeat/docs/loggingconfig.asciidoc | 13 ++++++++----- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/filebeat/docs/filebeat-general-options.asciidoc b/filebeat/docs/filebeat-general-options.asciidoc index fc53057f4c2..d1bcdf2e545 100644 --- a/filebeat/docs/filebeat-general-options.asciidoc +++ b/filebeat/docs/filebeat-general-options.asciidoc @@ -41,14 +41,14 @@ That means in case there are some states where the TTL expired, these are only r The permissions mask to apply on registry data file. The default value is 0600. The permissions option must be a valid Unix-style file permissions mask expressed in octal notation. In Go, numbers in octal notation must start with 0. The most permissive mask allowed is 0640. If a higher permissions mask is -specified via this setting, it will be subject to a umask of 0027. +specified via this setting, it will be subject to an umask of 0027. This option is not supported on Windows. Examples: - 0640: give read and write access to the file owner, and read access to members of the group associated with the file. - 0600: give read and write access to the file owner, and no access to all others. +* 0640: give read and write access to the file owner, and read access to members of the group associated with the file. +* 0600: give read and write access to the file owner, and no access to all others. [source,yaml] ------------------------------------------------------------------------------------- diff --git a/filebeat/docs/howto/override-config-settings.asciidoc b/filebeat/docs/howto/override-config-settings.asciidoc index cb69353f00b..63bcbfa784d 100644 --- a/filebeat/docs/howto/override-config-settings.asciidoc +++ b/filebeat/docs/howto/override-config-settings.asciidoc @@ -37,7 +37,7 @@ logging.files: path: /var/log/filebeat name: filebeat keepfiles: 7 - permissions: 0644 + permissions: 0640 ---- To override the logging level and send logging output to standard error instead diff --git a/libbeat/docs/loggingconfig.asciidoc b/libbeat/docs/loggingconfig.asciidoc index d6232e2cb07..b5443d2d978 100644 --- a/libbeat/docs/loggingconfig.asciidoc +++ b/libbeat/docs/loggingconfig.asciidoc @@ -32,7 +32,7 @@ logging.files: path: /var/log/{beatname_lc} name: {beatname_lc} keepfiles: 7 - permissions: 0644 + permissions: 0640 ---- endif::win_only[] @@ -45,7 +45,7 @@ logging.files: path: C:{backslash}ProgramData{backslash}{beatname_lc}{backslash}Logs name: {beatname_lc} keepfiles: 7 - permissions: 0644 + permissions: 0640 ---- endif::win_only[] @@ -231,12 +231,15 @@ The permissions mask to apply when rotating log files. The default value is expressed in octal notation. In Go, numbers in octal notation must start with '0'. +The most permissive mask allowed is 0640. If a higher permissions mask is +specified via this setting, it will be subject to an umask of 0027. + +This option is not supported on Windows. + Examples: -* 0644: give read and write access to the file owner, and read access to all others. +* 0640: give read and write access to the file owner, and read access to members of the group associated with the file. * 0600: give read and write access to the file owner, and no access to all others. -* 0664: give read and write access to the file owner and members of the group -associated with the file, as well as read access to all other users. [float] ==== `logging.files.interval` From 1b7d6e090cad8b603671f6c5dba41027a9ed19a0 Mon Sep 17 00:00:00 2001 From: Alex Resnick Date: Fri, 17 Dec 2021 12:31:39 -0600 Subject: [PATCH 29/57] Fix cloudtrail config (#29450) --- CHANGELOG.next.asciidoc | 5 +++-- x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml | 4 ++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 770c17fb03c..ab3dbaa0c40 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -147,6 +147,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix `panw` module ingest errors for GLOBALPROTECT logs {pull}29154[29154] - aws-s3: Stop trying to increase SQS message visibility after ReceiptHandleIsInvalid errors. {pull}29480[29480] - Fix handling of IPv6 addresses in netflow flow events. {issue}19210[19210] {pull}29383[29383] +- Undo deletion of endpoint config from cloudtrail fileset in {pull}29415[29415]. {pull}29450[29450] *Heartbeat* @@ -227,8 +228,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] - Support self signed certificates on outputs {pull}29229[29229] - Update k8s library {pull}29394[29394] -- Add FIPS configuration option for all AWS API calls. {pull}[28899] -- Add `default_region` config to AWS common module. {pull}[29415] +- Add FIPS configuration option for all AWS API calls. {pull}28899[28899] +- Add `default_region` config to AWS common module. {pull}29415[29415] *Auditbeat* diff --git a/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml b/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml index 8c98bc31be6..ada3a502fc2 100644 --- a/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml +++ b/x-pack/filebeat/module/aws/cloudtrail/config/aws-s3.yml @@ -49,6 +49,10 @@ visibility_timeout: {{ .visibility_timeout }} api_timeout: {{ .api_timeout }} {{ end }} +{{ if .endpoint }} +endpoint: {{ .endpoint }} +{{ end }} + {{ if .default_region }} default_region: {{ .default_region }} {{ end }} From e3b1183a3c6d4bafa40362b3bd44d356994352fc Mon Sep 17 00:00:00 2001 From: Andrew Cholakian Date: Fri, 17 Dec 2021 17:33:51 -0600 Subject: [PATCH 30/57] [heartbeat] Only add monitor.status to browser summary events (#29460) we now only add monitor status to summary events for browsers. Additionally, this renames the poorly renamed wrappers/monitor* to wrappers/wrappers* --- CHANGELOG.next.asciidoc | 1 + .../wrappers/{monitors.go => wrappers.go} | 14 ++- .../{monitors_test.go => wrappers_test.go} | 87 ++++++++++++++----- 3 files changed, 77 insertions(+), 25 deletions(-) rename heartbeat/monitors/wrappers/{monitors.go => wrappers.go} (96%) rename heartbeat/monitors/wrappers/{monitors_test.go => wrappers_test.go} (87%) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index ab3dbaa0c40..dcedb9456fa 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -42,6 +42,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add `while_pattern` type to multiline reader. {pull}19662[19662] *Heartbeat* +- Only add monitor.status to browser events when summary. {pull}29460[29460] *Metricbeat* diff --git a/heartbeat/monitors/wrappers/monitors.go b/heartbeat/monitors/wrappers/wrappers.go similarity index 96% rename from heartbeat/monitors/wrappers/monitors.go rename to heartbeat/monitors/wrappers/wrappers.go index dca3a8b70de..c9e0c79c105 100644 --- a/heartbeat/monitors/wrappers/monitors.go +++ b/heartbeat/monitors/wrappers/wrappers.go @@ -51,7 +51,7 @@ func WrapLightweight(js []jobs.Job, stdMonFields stdfields.StdMonitorFields) []j jobs.WrapAll( js, addMonitorMeta(stdMonFields, len(js) > 1), - addMonitorStatus(stdMonFields.Type), + addMonitorStatus(stdMonFields.Type, false), addMonitorDuration, ), func() jobs.JobWrapper { @@ -66,7 +66,7 @@ func WrapBrowser(js []jobs.Job, stdMonFields stdfields.StdMonitorFields) []jobs. return jobs.WrapAll( js, addMonitorMeta(stdMonFields, len(js) > 1), - addMonitorStatus(stdMonFields.Type), + addMonitorStatus(stdMonFields.Type, true), ) } @@ -142,12 +142,18 @@ func timespan(started time.Time, sched *schedule.Schedule, timeout time.Duration // by the original Job will be set as a field. The original error will not be // passed through as a return value. Errors may still be present but only if there // is an actual error wrapping the error. - -func addMonitorStatus(monitorType string) jobs.JobWrapper { +func addMonitorStatus(monitorType string, summaryOnly bool) jobs.JobWrapper { return func(origJob jobs.Job) jobs.Job { return func(event *beat.Event) ([]jobs.Job, error) { cont, err := origJob(event) + if summaryOnly { + hasSummary, _ := event.Fields.HasKey("summary.up") + if !hasSummary { + return cont, nil + } + } + fields := common.MapStr{ "monitor": common.MapStr{ "status": look.Status(err), diff --git a/heartbeat/monitors/wrappers/monitors_test.go b/heartbeat/monitors/wrappers/wrappers_test.go similarity index 87% rename from heartbeat/monitors/wrappers/monitors_test.go rename to heartbeat/monitors/wrappers/wrappers_test.go index 88e6fd76997..b84b18f65a9 100644 --- a/heartbeat/monitors/wrappers/monitors_test.go +++ b/heartbeat/monitors/wrappers/wrappers_test.go @@ -428,7 +428,6 @@ func TestInlineBrowserJob(t *testing.T) { "id": testMonFields.ID, "name": testMonFields.Name, "type": fields.Type, - "status": "up", "check_group": "inline-check-group", }, }), @@ -450,7 +449,7 @@ var suiteBrowserJobValues = struct { checkGroup: "journey-1-check-group", } -func makeSuiteBrowserJob(t *testing.T, u string) jobs.Job { +func makeSuiteBrowserJob(t *testing.T, u string, summary bool, suiteErr error) jobs.Job { parsed, err := url.Parse(u) require.NoError(t, err) return func(event *beat.Event) (i []jobs.Job, e error) { @@ -462,7 +461,18 @@ func makeSuiteBrowserJob(t *testing.T, u string) jobs.Job { "check_group": suiteBrowserJobValues.checkGroup, }, }) - return nil, nil + if summary { + sumFields := common.MapStr{"up": 0, "down": 0} + if suiteErr == nil { + sumFields["up"] = 1 + } else { + sumFields["down"] = 1 + } + eventext.MergeEventFields(event, common.MapStr{ + "summary": sumFields, + }) + } + return nil, suiteErr } } @@ -470,30 +480,65 @@ func TestSuiteBrowserJob(t *testing.T) { fields := testBrowserMonFields urlStr := "http://foo.com" urlU, _ := url.Parse(urlStr) + expectedMonFields := lookslike.MustCompile(map[string]interface{}{ + "monitor": map[string]interface{}{ + "id": fmt.Sprintf("%s-%s", testMonFields.ID, suiteBrowserJobValues.id), + "name": fmt.Sprintf("%s - %s", testMonFields.Name, suiteBrowserJobValues.name), + "type": fields.Type, + "check_group": suiteBrowserJobValues.checkGroup, + "timespan": common.MapStr{ + "gte": hbtestllext.IsTime, + "lt": hbtestllext.IsTime, + }, + }, + "url": URLFields(urlU), + }) testCommonWrap(t, testDef{ - "simple", + "simple", // has no summary fields! fields, - []jobs.Job{makeSuiteBrowserJob(t, urlStr)}, + []jobs.Job{makeSuiteBrowserJob(t, urlStr, false, nil)}, []validator.Validator{ - lookslike.Compose( - urlValidator(t, urlStr), - lookslike.Strict( + lookslike.Strict( + lookslike.Compose( + urlValidator(t, urlStr), + expectedMonFields, + ))}, + nil, + }) + testCommonWrap(t, testDef{ + "with up summary", + fields, + []jobs.Job{makeSuiteBrowserJob(t, urlStr, true, nil)}, + []validator.Validator{ + lookslike.Strict( + lookslike.Compose( + urlValidator(t, urlStr), + expectedMonFields, lookslike.MustCompile(map[string]interface{}{ - "monitor": map[string]interface{}{ - "id": fmt.Sprintf("%s-%s", testMonFields.ID, suiteBrowserJobValues.id), - "name": fmt.Sprintf("%s - %s", testMonFields.Name, suiteBrowserJobValues.name), - "type": fields.Type, - "check_group": suiteBrowserJobValues.checkGroup, - "status": "up", - "timespan": common.MapStr{ - "gte": hbtestllext.IsTime, - "lt": hbtestllext.IsTime, - }, + "monitor": map[string]interface{}{"status": "up"}, + "summary": map[string]interface{}{"up": 1, "down": 0}, + }), + ))}, + nil, + }) + testCommonWrap(t, testDef{ + "with down summary", + fields, + []jobs.Job{makeSuiteBrowserJob(t, urlStr, true, fmt.Errorf("testerr"))}, + []validator.Validator{ + lookslike.Strict( + lookslike.Compose( + urlValidator(t, urlStr), + expectedMonFields, + lookslike.MustCompile(map[string]interface{}{ + "monitor": map[string]interface{}{"status": "down"}, + "summary": map[string]interface{}{"up": 0, "down": 1}, + "error": map[string]interface{}{ + "type": isdef.IsString, + "message": "testerr", }, - "url": URLFields(urlU), }), - ), - )}, + ))}, nil, }) } From ef6899f3e16218a10160eafe6b284127f7bebc2d Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Sun, 19 Dec 2021 15:56:07 +0000 Subject: [PATCH 31/57] update beats-tester main branch (#29496) --- .ci/beats-tester-bc.groovy | 2 +- .ci/beats-tester.groovy | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/beats-tester-bc.groovy b/.ci/beats-tester-bc.groovy index d7fb36f15bd..d026fa77a15 100644 --- a/.ci/beats-tester-bc.groovy +++ b/.ci/beats-tester-bc.groovy @@ -7,7 +7,7 @@ pipeline { environment { BASE_DIR = 'src/github.com/elastic/beats' PIPELINE_LOG_LEVEL = "INFO" - BEATS_TESTER_JOB = 'Beats/beats-tester-mbp/master' + BEATS_TESTER_JOB = 'Beats/beats-tester-mbp/main' BASE_URL = "https://staging.elastic.co/${params.version}/downloads" APM_BASE_URL = "${env.BASE_URL}/apm-server" BEATS_BASE_URL = "${env.BASE_URL}/beats" diff --git a/.ci/beats-tester.groovy b/.ci/beats-tester.groovy index 8d96fb1efea..ef0b8382826 100644 --- a/.ci/beats-tester.groovy +++ b/.ci/beats-tester.groovy @@ -7,7 +7,7 @@ pipeline { environment { BASE_DIR = 'src/github.com/elastic/beats' PIPELINE_LOG_LEVEL = "INFO" - BEATS_TESTER_JOB = 'Beats/beats-tester-mbp/master' + BEATS_TESTER_JOB = 'Beats/beats-tester-mbp/main' } options { timeout(time: 1, unit: 'HOURS') From e9c18f7f357a5349bbbb2883a18c925d5d8aa1b0 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 20 Dec 2021 02:06:28 -0500 Subject: [PATCH 32/57] [Automation] Update elastic stack version to 8.1.0-befff95a for testing (#29535) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index ce828d0f5ab..2b252a4a60c 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-d8a3a806-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-befff95a-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -21,7 +21,7 @@ services: - "script.context.template.cache_max_size=2000" logstash: - image: docker.elastic.co/logstash/logstash-oss:8.1.0-d8a3a806-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:8.1.0-befff95a-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -31,7 +31,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-d8a3a806-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-befff95a-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 2ef97dfeebd..d46af8af39a 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-d8a3a806-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-befff95a-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -37,7 +37,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-d8a3a806-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-befff95a-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From d61c298e8050a1b6bdd9125a080f87d8726f94d3 Mon Sep 17 00:00:00 2001 From: Andres Rodriguez Date: Mon, 20 Dec 2021 08:07:03 +0100 Subject: [PATCH 33/57] Forward port 7.16.2 changelog to master and cleanup (#29532) --- CHANGELOG.asciidoc | 21 +++++ CHANGELOG.next.asciidoc | 149 ++-------------------------------- libbeat/docs/release.asciidoc | 1 + 3 files changed, 28 insertions(+), 143 deletions(-) diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 082ef758cb6..dfad0bb2de1 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -17,6 +17,27 @@ Changes will be described in a later alpha / beta. === Beats version 8.0.0-alpha1 Changes will be described in a later alpha / beta. +[[release-notes-7.16.2]] +=== Beats version 7.16.2 +https://github.com/elastic/beats/compare/v7.16.1...v7.16.2[View commits] + +==== Bugfixes + +*Filebeat* + +- Resolve issue with @timestamp for `defender_atp`. {pull}28272[28272] +- Fix handling of escaped newlines in the `decode_cef` processor. {issue}16995[16995] {pull}29268[29268] + +==== Added + +*Filebeat* + +- Update Cisco module to enable TCP input. {issue}26118[26118] {issue}28821[28821] {pull}26159[26159] + +*Winlogbeat* + +- Add configuration option for registry file flush timeout {issue}29001[29001] {pull}29053[29053] + [[release-notes-7.16.1]] === Beats version 7.16.1 https://github.com/elastic/beats/compare/v7.16.0...v7.16.1[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index dcedb9456fa..1ceabcb9c57 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -10,7 +10,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* -- Remove the non-ECS `agent.hostname` field. Use the `agent.name` or `agent.id` fields for an identifier. {issue}16377[16377] {pull}18328[18328] - Remove the deprecated `xpack.monitoring.*` settings. Going forward only `monitoring.*` settings may be used. {issue}9424[9424] {pull}18608[18608] - Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull}28006[28006] - Remove deprecated fields from kubernetes module {pull}28046[28046] @@ -36,7 +35,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Filebeat* - Fix parsing of Elasticsearch node name by `elasticsearch/slowlog` fileset. {pull}14547[14547] -- With the default configuration the following modules will no longer send the `host` field that contains information about the host on which Filebeat is running. You can revert this change by configuring tags for the module and omitting `forwarded` from the list. {issue}13920[13920] - With the default configuration the cloud modules (aws, azure, googlecloud, o365, okta) - With the default configuration the cef and panw modules will no longer send the `host` - Add `while_pattern` type to multiline reader. {pull}19662[19662] @@ -71,7 +69,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Improve ECS field mappings in Sysmon module. Hashes are now also populated to the corresponding `process.hash`, `process.pe.imphash`, `file.hash`, or `file.pe.imphash`. {issue}18364[18364] - Improve ECS field mappings in Sysmon module. `file.name`, `file.directory`, and `file.extension` are now populated. {issue}18364[18364] - Improve ECS field mappings in Sysmon module. `rule.name` is populated for all events when present. {issue}18364[18364] -- Fix unprefixed fields in `fields.yml` for Powershell module {issue}18984[18984] - Remove top level `hash` property from sysmon events {pull}20653[20653] *Functionbeat* @@ -81,109 +78,21 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* -- Fix a race condition with the Kafka pipeline client, it is possible that `Close()` get called before `Connect()` . {issue}11945[11945] -- Allow users to configure only `cluster_uuid` setting under `monitoring` namespace. {pull}14338[14338] -- Update replicaset group to apps/v1 {pull}15854[15802] -- Fix missing output in dockerlogbeat {pull}15719[15719] -- Fix issue where TLS settings would be ignored when a forward proxy was in use. {pull}15516[15516] -- Update replicaset group to apps/v1 {pull}15854[15802] -- Add `ssl.ca_sha256` option to the supported TLS option, this allow to check that a specific certificate is used as part of the verified chain. {issue}15717[15717] -- Improve some logging messages for add_kubernetes_metadata processor {pull}16866{16866} -- Do not rotate log files on startup when interval is configured and rotateonstartup is disabled. {pull}17613[17613] -- Fix `setup.dashboards.index` setting not working. {pull}17749[17749] -- Fix Elasticsearch license endpoint URL referenced in error message. {issue}17880[17880] {pull}18030[18030] -- Change `decode_json_fields` processor, to merge parsed json objects with existing objects in the event instead of fully replacing them. {pull}17958[17958] -- Gives monitoring reporter hosts, if configured, total precedence over corresponding output hosts. {issue}17937[17937] {pull}17991[17991] -- Change `decode_json_fields` processor, to merge parsed json objects with existing objects in the event instead of fully replacing them. {pull}17958[17958] -- [Autodiscover] Check if runner is already running before starting again. {pull}18564[18564] -- Fix an issue where error messages are not accurate in mapstriface. {issue}18662[18662] {pull}18663[18663] -- Fix regression in `add_kubernetes_metadata`, so configured `indexers` and `matchers` are used if defaults are not disabled. {issue}18481[18481] {pull}18818[18818] -- Fix the `translate_sid` processor's handling of unconfigured target fields. {issue}18990[18990] {pull}18991[18991] -- Fixed a service restart failure under Windows. {issue}18914[18914] {pull}18916[18916] -- Fix terminating pod autodiscover issue. {pull}20084[20084] -- Fix seccomp policy for calls to `chmod` and `chown`. {pull}20054[20054] -- Output errors when Kibana index pattern setup fails. {pull}20121[20121] -- Fix issue in autodiscover that kept inputs stopped after config updates. {pull}20305[20305] -- Add service resource in k8s cluster role. {pull}20546[20546] -- Allows disable pod events enrichment with deployment name {pull}28521[28521] -- Fix `fingerprint` processor to give it access to the `@timestamp` field. {issue}28683[28683] -- Fix the wrong beat name on monitoring and state endpoint {issue}27755[27755] - *Auditbeat* -- system/package: Fix parsing of Installed-Size field of DEB packages. {issue}16661[16661] {pull}17188[17188] -- system module: Fix panic during initialisation when /proc/stat can't be read. {pull}17569[17569] -- system/package: Fix an error that can occur while trying to persist package metadata. {issue}18536[18536] {pull}18887[18887] -- system/socket: Fix bugs leading to wrong process being attributed to flows. {pull}29166[29166] {issue}17165[17165] *Filebeat* -- cisco/asa fileset: Fix parsing of 302021 message code. {pull}14519[14519] -- Fix filebeat azure dashboards, event category should be `Alert`. {pull}14668[14668] -- Fix s3 input with cloudtrail fileset reading json file. {issue}16374[16374] {pull}16441[16441] -- Add queue_url definition in manifest file for aws module. {pull}16640[16640] -- Add queue_url definition in manifest file for aws module. {pull}16640{16640} -- Fix `elasticsearch.gc` fileset to not collect _all_ logs when Elasticsearch is running in Docker. {issue}13164[13164] {issue}16583[16583] {pull}17164[17164] -- Fixed a mapping exception when ingesting CEF logs that used the spriv or dpriv extensions. {issue}17216[17216] {pull}17220[17220] -- Remove migrationVersion map 7.7.0 reference from Kibana dashboard file to fix backward compatibility issues. {pull}17425[17425] -- Fix issue 17734 to retry on rate-limit error in the Filebeat httpjson input. {issue}17734[17734] {pull}17735[17735] -- Fixed `cloudfoundry.access` to have the correct `cloudfoundry.app.id` contents. {pull}17847[17847] -- Fixing `ingress_controller.` fields to be of type keyword instead of text. {issue}17834[17834] -- Fixed typo in log message. {pull}17897[17897] -- Fix `o365` module ignoring `var.api` settings. {pull}18948[18948] -- Fix `netflow` module to support 7 bytepad for IPFIX template. {issue}18098[18098] -- Update container name for the azure filesets. {pull}19899[19899] -- Fix `o365` module ignoring `var.api` settings. {pull}18948[18948] -- Fix S3 input to trim delimiter /n from each log line. {pull}19972[19972] -- Fix s3 input parsing json file without expand_event_list_from_field. {issue}19902[19902] {pull}19962[19962] {pull}20370[20370] -- Fix millisecond timestamp normalization issues in CrowdStrike module {issue}20035[20035], {pull}20138[20138] -- Fix support for message code 106100 in Cisco ASA and FTD. {issue}19350[19350] {pull}20245[20245] -- Fix `fortinet` setting `event.timezone` to the system one when no `tz` field present {pull}20273[20273] -- Fix `okta` geoip lookup in pipeline for `destination.ip` {pull}20454[20454] -- Fix mapping exception in the `googlecloud/audit` dataset pipeline. {issue}18465[18465] {pull}20465[20465] -- Fix `cisco` asa and ftd parsing of messages 106102 and 106103. {pull}20469[20469] -- Resolve issue with @timestamp for defender_atp. {pull}28272[28272] -- Fix `threatintel.misp` filters configuration. {issue}27970[27970] -- Fix handling of escaped newlines in the `decode_cef` processor. {issue}16995[16995] {pull}29268[29268] -- Fix `panw` module ingest errors for GLOBALPROTECT logs {pull}29154[29154] - aws-s3: Stop trying to increase SQS message visibility after ReceiptHandleIsInvalid errors. {pull}29480[29480] - Fix handling of IPv6 addresses in netflow flow events. {issue}19210[19210] {pull}29383[29383] - Undo deletion of endpoint config from cloudtrail fileset in {pull}29415[29415]. {pull}29450[29450] *Heartbeat* -- Remove accidentally included cups library in docker images. {pull}28853[pull] - Fix broken monitors with newer versions of image relying on dup3. {pull}28938[pull] *Metricbeat* -- Fix checking tagsFilter using length in cloudwatch metricset. {pull}14525[14525] -- Log bulk failures from bulk API requests to monitoring cluster. {issue}14303[14303] {pull}14356[14356] -- Fix skipping protocol scheme by light modules. {pull}16205[pull] -- Revert changes in `docker` module: add size flag to docker.container. {pull}16600[16600] -- Fix detection and logging of some error cases with light modules. {pull}14706[14706] -- Fix imports after PR was merged before rebase. {pull}16756[16756] -- Reduce memory usage in `elasticsearch/index` metricset. {issue}16503[16503] {pull}16538[16538] -- Fix issue in Jolokia module when mbean contains multiple quoted properties. {issue}17375[17375] {pull}17374[17374] -- Fix issue in Jolokia module when mbean contains multiple quoted properties. {issue}17375[17375] {pull}17374[17374] -- Fix azure storage dashboards. {pull}17590[17590] -- Metricbeat no longer needs to be started strictly after Logstash for `logstash-xpack` module to report correct data. {issue}17261[17261] {pull}17497[17497] -- Fix pubsub metricset to collect all GA stage metrics from gcp stackdriver. {issue}17154[17154] {pull}17600[17600] -- Add privileged option so as mb to access data dir in Openshift. {pull}17606[17606] -- Fix "ID" event generator of Google Cloud module {issue}17160[17160] {pull}17608[17608] -- Add privileged option for Auditbeat in Openshift {pull}17637[17637] -- Fix storage metricset to allow config without region/zone. {issue}17623[17623] {pull}17624[17624] -- Fix overflow on Prometheus rates when new buckets are added on the go. {pull}17753[17753] -- Remove specific win32 api errors from events in perfmon. {issue}18292[18292] {pull}18361[18361] -- Fix application_pool metricset after pdh changes. {pull}18477[18477] -- Fix panic on `metricbeat test modules` when modules are configured in `metricbeat.modules`. {issue}18789[18789] {pull}18797[18797] -- Fix getting gcp compute instance metadata with partial zone/region in config. {pull}18757[18757] -- Add missing network.sent_packets_count metric into compute metricset in googlecloud module. {pull}18802[18802] -- Fix compute and pubsub dashboard for googlecloud module. {issue}18962[18962] {pull}18980[18980] -- Fix crash on vsphere module when Host information is not available. {issue}18996[18996] {pull}19078[19078] -- Modify doc for app_insights metricset to contain example of config. {pull}20185[20185] -- Add required option for `metrics` in app_insights. {pull}20406[20406] -- Groups same timestamp metric values to one event in the app_insights metricset. {pull}20403[20403] - Use xpack.enabled on SM modules to write into .monitoring indices when using Metricbeat standalone {pull}28365[28365] - Fix in rename processor to ingest metrics for `write.iops` to proper field instead of `write_iops` in rds metricset. {pull}28960[28960] - Enhance filter check in kubernetes event metricset. {pull}29470[29470] @@ -195,7 +104,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Winlogbeat* -- Add source.ip validation for event ID 4778 in the Security module. {issue}19627[19627] *Functionbeat* @@ -207,25 +115,10 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* -- Decouple Debug logging from fail_on_error logic for rename, copy, truncate processors {pull}12451[12451] -- Fingerprint processor adds a new xxhash hashing algorithm {pull}15418[15418] -- Update RPM packages contained in Beat Docker images. {issue}17035[17035] -- Update documentation for system.process.memory fields to include clarification on Windows os's. {pull}17268[17268] -- When using the `decode_json_fields` processor, decoded fields are now deep-merged into existing event. {pull}17958[17958] -- Update documentation for system.process.memory fields to include clarification on Windows os's. {pull}17268[17268] -- Add keystore support for autodiscover static configurations. {pull]16306[16306] -- When using the `decode_json_fields` processor, decoded fields are now deep-merged into existing event. {pull}17958[17958] -- Add keystore support for autodiscover static configurations. {pull]16306[16306] -- Add TLS support to Kerberos authentication in Elasticsearch. {pull}18607[18607] - Add config option `rotate_on_startup` to file output {issue}19150[19150] {pull}19347[19347] -- Set index.max_docvalue_fields_search in index template to increase value to 200 fields. {issue}20215[20215] -- Upgrade prometheus library. {pull}28716[28716] - Name all k8s workqueue. {pull}28085[28085] -- Add options to configure k8s client qps/burst. {pull}28151[28151] - Update to ECS 8.0 fields. {pull}28620[28620] -- Add http.pprof.enabled option to libbeat to allow http/pprof endpoints on the socket that libbeat creates for metrics. {issue}21965[21965] - Support custom analyzers in fields.yml. {issue}28540[28540] {pull}28926[28926] -- SASL/SCRAM in the Kafka output is no longer beta. {pull}29126[29126] - Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] - Support self signed certificates on outputs {pull}29229[29229] - Update k8s library {pull}29394[29394] @@ -234,55 +127,21 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Auditbeat* -- Reference kubernetes manifests include configuration for auditd and enrichment with kubernetes metadata. {pull}17431[17431] *Filebeat* -- `container` and `docker` inputs now support reading of labels and env vars written by docker JSON file logging driver. {issue}8358[8358] -- Add `index` option to all inputs to directly set a per-input index value. {pull}14010[14010] -- move create-[module,fileset,fields] to mage and enable in x-pack/filebeat {pull}15836[15836] -- Work on e2e ACK's for the azure-eventhub input {issue}15671[15671] {pull}16215[16215] -- Add a TLS test and more debug output to httpjson input {pull}16315[16315] -- Add an SSL config example in config.yml for filebeat MISP module. {pull}16320[16320] -- Update filebeat httpjson input to support pagination via Header and Okta module. {pull}16354[16354] -- Add a TLS test and more debug output to httpjson input {pull}16315[16315] -- Add an SSL config example in config.yml for filebeat MISP module. {pull}16320[16320] -- Added documentation for running Filebeat in Cloud Foundry. {pull}17275[17275] -- Release Google Cloud module as GA. {pull}17511[17511] -- Improve ECS categorization field mappings for nats module. {issue}16173[16173] {pull}17550[17550] -- Enhance `elasticsearch/slowlog` fileset to handle ECS-compatible logs emitted by Elasticsearch. {issue}17715[17715] {pull}17729[17729] -- Added documentation for running Filebeat in Cloud Foundry. {pull}17275[17275] -- Release Google Cloud module as GA. {pull}17511[17511] -- Update filebeat httpjson input to support pagination via Header and Okta module. {pull}16354[16354] -- Change the `json.*` input settings implementation to merge parsed json objects with existing objects in the event instead of fully replacing them. {pull}17958[17958] -- Add support for array parsing in azure-eventhub input. {pull}18585[18585] -- Add support for array parsing in azure-eventhub input. {pull}18585[18585] -- Improved performance of PANW sample dashboards. {issue}19031[19031] {pull}19032[19032] -- Add event.ingested for CrowdStrike module {pull}20138[20138] -- Add support for additional fields and FirewallMatchEvent type events in CrowdStrike module {pull}20138[20138] -- Azure signinlogs - Add support for ManagedIdentitySignInLogs, NonInteractiveUserSignInLogs, and ServicePrincipalSignInLogs. {issue}23653[23653] - Add `text/csv` decoder to `httpjson` input {pull}28564[28564] - Update `aws-s3` input to connect to non AWS S3 buckets {issue}28222[28222] {pull}28234[28234] - Add support for '/var/log/pods/' path for add_kubernetes_metadata processor with `resource_type: pod`. {pull}28868[28868] - Add documentation for add_kubernetes_metadata processors `log_path` matcher. {pull}28868[28868] - Add support for parsers on journald input {pull}29070[29070] - Add support in httpjson input for oAuth2ProviderDefault of password grant_type. {pull}29087[29087] -- Update Cisco module to enable TCP input. {issue}26118[26118] {issue}28821[28821] {pull}26159[26159] *Heartbeat* *Metricbeat* -- Move the windows pdh implementation from perfmon to a shared location in order for future modules/metricsets to make use of. {pull}15503[15503] -- Add database_account azure metricset. {issue}15758[15758] -- Add database_account azure metricset. {issue}15758[15758] -- Release Zookeeper/connection module as GA. {issue}14281[14281] {pull}17043[17043] -- Add dashboard for pubsub metricset in googlecloud module. {pull}17161[17161] -- Added documentation for running Metricbeat in Cloud Foundry. {pull}17275[17275] -- Added documentation for running Metricbeat in Cloud Foundry. {pull}17275[17275] -- Remove required for region/zone and make stackdriver a metricset in googlecloud. {issue}16785[16785] {pull}18398[18398] -- Add memory metrics into compute googlecloud. {pull}18802[18802] - Preliminary AIX support {pull}27954[27954] - Add option to skip older k8s events {pull}29396[29396] - Add `add_resource_metadata` configuration to Kubernetes module. {pull}29133[29133] @@ -294,8 +153,6 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Winlogbeat* -- Add more DNS error codes to the Sysmon module. {issue}15685[15685] -- Add configuration option for registry file flush timeout {issue}29001[29001] {pull}29053[29053] - Add support for custom XML queries {issue}1054[1054] {pull}29330[29330] *Elastic Log Driver* @@ -324,3 +181,9 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d ==== Known Issue *Journalbeat* + + + + + + diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 1529ebf2872..26e1bd6b14d 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -11,6 +11,7 @@ upgrade. * <> * <> * <> +* <> * <> * <> * <> From fc01723bd352e712d2787b4759dadc2c221b0270 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 20 Dec 2021 07:38:13 +0000 Subject: [PATCH 34/57] [mergify]: keep the house clean for conflicts in automated bumps (#29455) --- .mergify.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.mergify.yml b/.mergify.yml index fcf643d21ac..7815f7579d3 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -21,6 +21,7 @@ pull_request_rules: - -merged - -closed - conflict + - -author=apmmachine actions: comment: message: | @@ -32,6 +33,18 @@ pull_request_rules: git merge upstream/{{base}} git push upstream {{head}} ``` + - name: close automated pull requests with bump updates if any conflict + conditions: + - -merged + - -closed + - conflict + - author=apmmachine + - label=automation + actions: + close: + message: | + This pull request has been automatically closed by Mergify. + There are some other up-to-date pull requests. - name: automatic approval for automated pull requests with bump updates conditions: - author=apmmachine From ab8b97ecdb36a36a879733e2c0433ed9a5815893 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 20 Dec 2021 09:15:24 +0000 Subject: [PATCH 35/57] jjbb: enable 7.17 (and 7.18/19) (#29522) --- .ci/jobs/beats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/jobs/beats.yml b/.ci/jobs/beats.yml index e9a26c87a0d..0500b2ba8f2 100644 --- a/.ci/jobs/beats.yml +++ b/.ci/jobs/beats.yml @@ -17,7 +17,7 @@ discover-pr-forks-strategy: 'merge-current' discover-pr-forks-trust: 'permission' discover-pr-origin: 'merge-current' - head-filter-regex: '(master|6\.[89]|7\.16|8\.\d+|PR-.*|v\d+\.\d+\.\d+)' + head-filter-regex: '(master|6\.[89]|7\.1[6789]|8\.\d+|PR-.*|v\d+\.\d+\.\d+)' discover-tags: true notification-context: "beats-ci" repo: 'beats' From 00de284f4f169591f4c1d993dfd6301551fd8ffb Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 20 Dec 2021 10:55:59 +0000 Subject: [PATCH 36/57] update e2e-testing main branch (#29527) --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 19978442bf8..04423d77b91 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -554,7 +554,7 @@ def e2e_with_entrypoint(Map args = [:]) { def dockerLogFile = "docker_logs_${entrypoint}.log" dir("${env.WORKSPACE}/src/github.com/elastic/e2e-testing") { // TBC with the target branch if running on a PR basis. - git(branch: 'master', credentialsId: '2a9602aa-ab9f-4e52-baf3-b71ca88469c7-UserAndToken', url: 'https://github.com/elastic/e2e-testing.git') + git(branch: 'main', credentialsId: '2a9602aa-ab9f-4e52-baf3-b71ca88469c7-UserAndToken', url: 'https://github.com/elastic/e2e-testing.git') if(isDockerInstalled()) { dockerLogin(secret: "${DOCKER_ELASTIC_SECRET}", registry: "${DOCKER_REGISTRY}") } From e6e65aa92fe355c95789691ebf5a3bcecaf5b4ea Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Mon, 20 Dec 2021 10:59:04 +0000 Subject: [PATCH 37/57] update ingest-dev main branch (#29525) --- .ci/jobs/beats-release-changelog.yml | 2 +- .ci/jobs/beats-release-minor-major.yml | 2 +- .ci/jobs/beats-release-patch.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.ci/jobs/beats-release-changelog.yml b/.ci/jobs/beats-release-changelog.yml index 1cbd94c7168..6a331022e8f 100644 --- a/.ci/jobs/beats-release-changelog.yml +++ b/.ci/jobs/beats-release-changelog.yml @@ -17,4 +17,4 @@ credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba reference-repo: /var/lib/jenkins/.git-references/ingest-dev.git branches: - - master + - main diff --git a/.ci/jobs/beats-release-minor-major.yml b/.ci/jobs/beats-release-minor-major.yml index 91c7d105fb8..04a2e07046d 100644 --- a/.ci/jobs/beats-release-minor-major.yml +++ b/.ci/jobs/beats-release-minor-major.yml @@ -17,4 +17,4 @@ credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba reference-repo: /var/lib/jenkins/.git-references/ingest-dev.git branches: - - master + - main diff --git a/.ci/jobs/beats-release-patch.yml b/.ci/jobs/beats-release-patch.yml index 4d205f79647..80c401e6c20 100644 --- a/.ci/jobs/beats-release-patch.yml +++ b/.ci/jobs/beats-release-patch.yml @@ -17,4 +17,4 @@ credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba reference-repo: /var/lib/jenkins/.git-references/ingest-dev.git branches: - - master + - main From 7941c1c936511407ed70460b5657579c9ae1628c Mon Sep 17 00:00:00 2001 From: Michael Katsoulis Date: Tue, 21 Dec 2021 14:17:03 +0200 Subject: [PATCH 38/57] Calculate memory.working_set.limit.pct for pod and container metricset (#29547) * Calculate memory.working_set.limit.pct for pod and container metricset --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/fields.asciidoc | 24 +++++++++++++++++++ .../kubernetes/container/_meta/fields.yml | 5 ++++ .../kubernetes/container/container_test.go | 9 +++---- .../module/kubernetes/container/data.go | 1 + metricbeat/module/kubernetes/fields.go | 2 +- .../module/kubernetes/pod/_meta/fields.yml | 5 ++++ metricbeat/module/kubernetes/pod/data.go | 3 +++ metricbeat/module/kubernetes/pod/pod_test.go | 7 +++--- 9 files changed, 49 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 1ceabcb9c57..9c736fd6c64 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -145,6 +145,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Preliminary AIX support {pull}27954[27954] - Add option to skip older k8s events {pull}29396[29396] - Add `add_resource_metadata` configuration to Kubernetes module. {pull}29133[29133] +- Add `memory.workingset.limit.pct` field in Kubernetes container/pod metricset. {pull}29547[29547] *Packetbeat* diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index c1e7087125c..2553dc415ed 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -41959,6 +41959,18 @@ format: bytes -- +*`kubernetes.container.memory.workingset.limit.pct`*:: ++ +-- +Working set memory usage as a percentage of the defined limit for the container (or total node allocatable memory if unlimited) + + +type: scaled_float + +format: percent + +-- + *`kubernetes.container.memory.pagefaults`*:: + -- @@ -43023,6 +43035,18 @@ format: bytes -- +*`kubernetes.pod.memory.working_set.limit.pct`*:: ++ +-- +Working set memory usage as a percentage of the defined limit for the pod containers (or total node allocatable memory if unlimited) + + +type: scaled_float + +format: percent + +-- + *`kubernetes.pod.memory.rss.bytes`*:: + diff --git a/metricbeat/module/kubernetes/container/_meta/fields.yml b/metricbeat/module/kubernetes/container/_meta/fields.yml index 8bd8bd6449a..81b99f75c0c 100644 --- a/metricbeat/module/kubernetes/container/_meta/fields.yml +++ b/metricbeat/module/kubernetes/container/_meta/fields.yml @@ -126,6 +126,11 @@ format: bytes description: > Working set memory usage + - name: limit.pct + type: scaled_float + format: percent + description: > + Working set memory usage as a percentage of the defined limit for the container (or total node allocatable memory if unlimited) - name: pagefaults type: double description: > diff --git a/metricbeat/module/kubernetes/container/container_test.go b/metricbeat/module/kubernetes/container/container_test.go index 30009846169..816464d7b9d 100644 --- a/metricbeat/module/kubernetes/container/container_test.go +++ b/metricbeat/module/kubernetes/container/container_test.go @@ -69,10 +69,11 @@ func TestEventMapping(t *testing.T) { "memory.majorpagefaults": 0, // calculated pct fields: - "cpu.usage.node.pct": 0.005631997, - "cpu.usage.limit.pct": 0.005631997, - "memory.usage.node.pct": 0.01, - "memory.usage.limit.pct": 0.1, + "cpu.usage.node.pct": 0.005631997, + "cpu.usage.limit.pct": 0.005631997, + "memory.usage.node.pct": 0.01, + "memory.usage.limit.pct": 0.1, + "memory.workingset.limit.pct": 0.09943977591036414, "name": "nginx", diff --git a/metricbeat/module/kubernetes/container/data.go b/metricbeat/module/kubernetes/container/data.go index 53a795624d9..ad2e1243339 100644 --- a/metricbeat/module/kubernetes/container/data.go +++ b/metricbeat/module/kubernetes/container/data.go @@ -135,6 +135,7 @@ func eventMapping(content []byte, perfMetrics *util.PerfMetricsCache) ([]common. if memLimit > 0 { containerEvent.Put("memory.usage.limit.pct", float64(container.Memory.UsageBytes)/memLimit) + containerEvent.Put("memory.workingset.limit.pct", float64(container.Memory.WorkingSetBytes)/memLimit) } events = append(events, containerEvent) diff --git a/metricbeat/module/kubernetes/fields.go b/metricbeat/module/kubernetes/fields.go index 73b71223fe1..e634e74a200 100644 --- a/metricbeat/module/kubernetes/fields.go +++ b/metricbeat/module/kubernetes/fields.go @@ -32,5 +32,5 @@ func init() { // AssetKubernetes returns asset data. // This is the base64 encoded zlib format compressed contents of module/kubernetes. func AssetKubernetes() string { - return "eJzsfc9z47aS/33+CpRPzrccHb7Hqa1XlXhe9nmTzHjtmcxha0uByJaEmAIYALRH76/fAsAfEAmApAjKHls6pDK21f1BdwPobjQaP6IH2L9HD8UKOAUJ4h1CksgM3qOLX+sfXrxDKAWRcJJLwuh79I93CCHU/AHageQkUd/mkAEW8B5t8DuEBEhJ6Ea8R/9zIUR2cYUutlLmF/+rfrdlXC4TRtdk8x6tcSbgHUJrAlkq3msGPyKKd9CCpz5ynysOnBV5+RMHPPW5oWvGd1j9GGGaIiGxJEKSRCC2RjlLBdphijeQotXe4rMoKdhobEQ4JwL4I/D6Ny5QAWAt+f10e4MMQUuU1edQpNWnDc2Gx+HvAoRcJBkBKg/+pML5APsnxtPW7wJo1eda00PwDZJC6bViJIIoOAhW8ATi4bgzlCFFTtptAKJYzYnBR74DI2F5fABIk0WXSVYICfxKMxU5TuCqls4PQVyPwFfxYP3r8+db1CHZsUyWRhSF5tkh2eVJJVC5VIziq6HEoFmgDos2lpTvl7yg8WB8BbkFjuQWKh6oECBQyveozagN5oHQNrcJSH4lNFWra0m9RyW7nNG4a1RFEm0xTTO1SllCCaJpr90TkahFXZNEa1ZpZsAy8QhcEBbRNEqCNYruMNsQtOQONreJEKpJ4iLcZr4DuWUR7VFPTAfRzqCZiGiG9YjbVCu2OWcJCOHk6DJE135v00vyYiEg6fy+opmyYpW1173OQK5vvyABCaNpG1nDaQc7xvdqWycpULlY7RvPrMs3Y3Tj+KXxy94j35cPUP2s/ggRiiqeJYY+iI+EywJnp0RYsuwDuE7FguVAFwkrOqtfL7QD1h+L3Qq4WnEVQbQmGdR/wLhfjUJiLiGNYDT3xmCQIDQBvcSUxl3xcE4AFQhEs/56Xy249vYXhVjkwBOgkmSw+H/eEbLVX5C4FGB+sRwjh2rOVyDQjiScldMJNXD8OnENQxS7ifoJ40qKXZFhSR4BuViFoE033gqapqR3qIp+LxBB/g1mZsfU9BjQCsEotVqQQ1qNsSAdYBypYgvmHBpW5AMYRM6ogGdVr4EwRr9d0PMr2EY5WMNdoDFUXEJxk+o6/fFtqhqYc6cxaZBFiL+Xt2errRIfCAvkyLK0hhzPyYvoLThzNzazDEugyf4YS3ZpS1QEr5SJKgTm38Q4Tvae1AspngnVmOh4wayK5AHkSbeckjXaEiHZhuMdMiD8YIe6EmNQVDSNJocqbx7PocFCbUfY/HAYmGfQY4N6uCaTgnO1jk2X3Q1dZ2SzlQNMndENLygldBM1VGnWz0RvWurbqGQUziqDTNKFkXuUlbxJ+pfaFAhLzcXJHhcpkQt49CliLHtND2l67vEahhwUNEgj8qxItpk3ew2VmNBpZxyWdGt6UY44dGS5lGTnTuWmWLZ/0ZOwuVcEUYeglV4ZvIv3ZShvv6BC4A04BOEbtg1Ff9c7D12AQlQPBsm4i3A/8T4GNhPHotxm4w1rq0+PhO3PdW12Su7XjEMpfIqpd8s6wIspU4LxwR4AeSBcYxiQ9rCsgbEUFrlzX2pwiQRnkC7XGcO+P6zCjjLSiTEGJV8sEK5oqn+ztU4NSSZxprEjnGUswRKvMlDfCw42Izsiv7/RprAmFFIDv87AN0vhpfqJVyKIrFFB9XchdR/iZWwzPIfcM6rf2Ea54ms2ckHCj5hk2G3+0xclXzSMhs29vqAaDde2lk89WJTgHCdE7pUD7KZer6vlX74F+RhrHi4bteC9BbnohX24WIhaD/xnFtN2ebcfj6JuZp+1HTSzxTsg61CEQ9j9iIdLsRoCyWOdc0DSBuKAdHimFS2V9FYW7bYd9hzMzedcvzSRGEF4B/zC/czfLfQjXU2PBaAX720OGfMEh7M0CL/PaUuId0oX0CubJXf39+E5UkF+YvyB0I0Af3LsdUjkqxkoEiCHSSbHG1jjInMkGMedYLsxNRktxQh5ONX7J/6L8ZMh0ty8uOpZxJhcR6wBehtxxh1jUte5iL2QsBsdcrwV18ctJ9slP8dmbhmVvvjzxWgniTu+OCIOO/vPWZYBNxckJp0CXNfEyusWcc4AnqVM9ZSV66cuhT1xCaz6bzx2H/EOhlVa/5vRiHxv6JpjIXmRyIJDl/i54NcM51zwey74PRf8DhjGueDXDeRc8DsY47ng91zwey74nV7w6/Ayx5YAPzH+8HcBhdvjPGbrU6BBOZymLG/6dv6bIVjX35WbeciXKOiaUCK2UdyJLzWxIaxxmsaw4a+VXhTBHkNOIZfbqDw1xd7pIzmJMl8bvnaVs6buDsxYCotEBeyJZO74+hjDhUeSaE8ipg+sjzEqyiGD3QLO5DZG7XjDvKaK3KmgOer2w5wMHs/R1XB2twcHS/5B1msS4BT4gojlDgvpycmsGMsAtx29vovt2+Zmu9Y1EajF410bja5ofddmPyJh9XkLdnsOUyFb5axA7UN6btS/kVssEeaANkCBY2n6iVT1xOW6esCBUBXYKuH+2u5ugkYkw/wG5tF1UNrXZntVXBCHhPFUGLnXxifJDszPcswlSYoMcyMEtMUCsUQXqacOhPqbEu9yB8ruYhJK+60JF3JZsqKenh7jC4A/VwDVODUP1PBQP2tblX0hZHZAikUPniYXIjqncgaDhG9yuDX8buiUlgBp00CAPAJ1iCNh+X4pmQtBs6dh0Qr1/Km3ILo7TWkouNoK2405juT+eZ/XR+5hjo48pM/owxz1MX7V24JDzrg0zS2IcOgiNIFm7bqx5myHnrYk2WrhmLWBiGZldOeGomaeP6p9QhFGjA7FYuXccYolnq6x30tKCAvBEqJ3hScit8E5FNKbewkd75HVdsChoxAUWrAGnCwdLFqaAWE0PFMaQJVelnFPBv6zJFuaxLoxBrf3G/9YYhBP3bQpLmNNEpFqEpgJ8IT7ZmN1erKM3ovmj7IXjS2Q8GFNQSIegH2h5O8CkD5SIGui3EpmAXGklOplHLL1MiP0ISKYu9/UOs5BKDRlnyLfNkLoI8seIV06MM61OlU8XXIJrVM4J/Et56fbm7qTUWk9AXXFbWmleD+Uba16GMddPOwFK8B0vvlaUR4h+rgT9svNhx7edtJiSsxnXVXUceb5luL5lqLnE/+WovZYv/cLiuebCu6/Od9U6Hzi3VQ4F6R3IJ8L0n3QzwXpPQXpFKSynmhrN//2yk3wDhIgjzrf76NVn0pw7jrXHIx6KKJvPk51Hum1K+Uzx1TsiJQvSS+fnXqpDzXOt0Cqz0B5/nK+ADJaROe7H/anI563ce3DKl/wXDlvwzpNx4AG10vpFdAg8vULqP2cgnqzPMes4WSn/MKZ+j/494d+Bn1M0MCZjoanUYbMeDQu3XKz037w+B0EDdxF0NsW5IB9Bo1Z9t6kEN27UR3KHtzhmpLtzln6XSa7z7Fq9TnHqvbn+1LKdxirvolTphdzqtIB9hLb8Yxp+/imWj2qzbXuviPa7XfKHo+MAmIc7RgH+49LwooE5tDXCTLyqdv5eKkD/EXOvHMjrHjT8ehuWG8loXgwYfyDbh1ELl//SaQRzFPnPNIfbLz642ojkrqBg5KJvrnaI5gcb2A544mogTX4fHZ5Gjz+01mrfci3/ZSI37rPpGlNf3C4Lt53tMs5+laHrwNPk4tOo9zgcHXese4AtFvkTOHSIee99TBVaof0LNPpdJ4Zc/HqIPJz3vcNToOB/WYO/di+bjOBq4zhNeyYPjMtaOEuM5GRBfvL1Kch4e4yAUgTOssc5P08F8GHG8aYnjL1/Gl3lDnOqkf3kgk2oBjSRyZKF5kQfFdPiFiIgq0nQqCmGefgzjFtCEMbiwzX6nCwPR1FAlD9Gpy6tAzvFBOAF1uXjh4x1u2GcR1i4ipyWG+YENg5VTm4J0wI4FRlBrrBtE0oot24nMO+xi/H3Iof2PKl3g73NBm0KQWZPhQrMG566azvaeLMkfdsbUUGYuDO0C/++z1NbhWcO0W29QwgW9c/6HvQ0Y9umnl48Q14GtCPyfs8YMx1xgu9733A1mlozvUf7wjdRFP7R0MaWbRHPQE5EOJE3zUIcoQB9KA8iTWEB+M3iU7WQCRbSItsWvteK3NQ0zunDbo8XlnaoHOV9Ug2fY15Lc+kyKIM7L60UoSlhF0uu6QrnvVqEJGtmqwuuud0zDkd0wfpnI45p2NGIjqnY87pmHM65pyOOadjnBiCnSkNf1dfyiCEMT0pO7FYuxPkcZsk/H84fVj6T5oiyRDQ1BqMe1saCHtKWmIEmsAEbCOaNiPcmEIzMWfpIuegwhSFQDey3fXqsx/JLUtRQxeVdMeBmKIdN/+AIjwYpunDg6JPIWXcGIN7RSrIszbA0l5P6e/eO+ZO/8baQTzNxXWBGLR/dnBMzCD7Ju27NuO6ivBdm8txt3CaJzpj3MU5ugdXRzzXNS7ibhYoJJZFvNvr+RYLfxmlewDtQYTKuOvhaEbosmzOfIWeMJH6fyTwHaE4/Pwp4NR/wd7d6HogygahZuKW74EDqQJyf3EaoRI2nY7cR4AxfHqb1nca/NpgJunvq9EQuqxRXesGo0pp1xyL7W+M5T/j5IGt11fon5zri3W3RZZdofp/y993Vas+jNfaVyvQ5TXb5RlISK8aSVxjSpm8K6hmwfgV+vTp919JlkH6Qzn8hXOijLk20/sGhK7L9l0WMXR95dij1H59+0U3XBOGZUDvlY9/EkglO0jRydqQGZHP/7JY3/DL6mtT197brKDSy/PjblRWldb7rrYmnNG/2CrWxmqoRdlWO0ciwzdWdF3i6NBon9VNZeCkY7ktZQ9598wYwqchgXKWkRal+k5Eoly3CY/vNIG+IaU8c9E8ad4xEsv7EUtRiBxo2rlXHtqgD7jbMX5lQkTFLy66jeXqjtqO3HvAFT4MIHOWbJHoZN8rCE9YOPt216sUFnJZWUA0HEro+gWCCgYvqHuCwLeZ2CvKvexTwGlGqJ9zn819KAnUrPFaAq+nlEaSMP12B1euyBqTzNLEkP8J/9MfcKQYdoweXqWZcpT/QdO711dVTrwyNptTnpEEDw8eejYc5+hKJkdeMO6/2TXkOR3P1Y+Dp1WanhmVWFQY3gzECzEFQXigWdM0gCX1g5ejRsELh0qxpGdipZHQCno69Vq8+mA2is0ztt9NfMvIcoUaglHmfI4djVsGb7DB6WshNVxcYfAJlhELR600Qtds5CrSN0UnxegfGoyNuVXTtkZ9KXJIptxgjYWxOw0mzc9YsFzz0w8sT50vp0QHZfh0AVVAhoRJ7ok9zElwB05zhUiON7C2h6+2aOcv9FyIihFirQO3TWOkOuxohxwDPAgdvMywvzjjIy+Mlqc6BwzDIgxDFEkC0I3L4iLRXIRYF1kXTYVkVL++4TuGstD6oaax3qbvRaY6cHK9yTTYGVGwEJbWi1ihiPIAV5UHPSkyN9cWJkcldyw1NhyQACkJ3YzV57yuecLommwKrrOgNdSmiKeS4+V9Z+tvXDiOswwyItoHbLGEaHF48VK0sVobTkB+7Il6HjudLjlNW3ubfOcqrOsTm2MnRnEOYtq7cv1KkH4HlT1RYZ4l7G6WDTrHC00R0VWvNQWQoUvYLNDFNWf0v9jqwu8aE7FMGJWcZe3bCtEgfzKqNqfAhhG6vJC8gIsrdLHGmVD/wzi6+A/KKPzjwm2NI49ix5mjIT7BHqvlfB4R2vnag83DI8iCPlD2RAN67/GYoqItXaehUOsp/jLfvIpZDxBOH01Sgn50yT5kR5dK/FdIC1+JvpS830YKWqaGgzHqpHoAjfKAT19JgDlfW+YchCicb2XFEp5pxHVbMjpaiikRD6eA+4GIh8lgWSGXbL1UmGeE+qmQn9YK79E4c5KeQqa3Nx+OEukcVRFW/7b5ChHqp9rsbnFubvUYy77iPaAmRL4VprqD+eleZ7Nk/lxFBh99zfv6qiNqxTwr8hJtuPu8leCaRXe6DeJcpmnrJpgeqhUyKxzdBbL9CkItYOCCCAlUPrKs2MXyrxqyyNBtUouc7fRf/qjWT/jxuQtV/jDwFAlPIVto1gw7Zy95OJ+iCFWCjh2EKfLEScJ4qp9YZpZOPO4r43gDyyTDnRaNg7nfGyJIE6lzMR17QkMKAXx2mWSY7GYzziTDL9hEb/+4DtinGcKkR+R/JjSFtBKGn1VZ1LYsrWbCjLhrKhqr6RV/Vii5aQJu2lhnzZe7dq+EERx+0iSQIuHmMeP8uv3jeuGbTu7tc9KcifRsxpYJuSTtrbs8Rzw+ZaPgKdLo5jZGmD6OcRkIHFdyM+MNgBbM8grAXXUF4Bao2iUWi8Wxlf8x0U3LTFQlazMlsNoKr7i58F510bZLN6ZVtVmzsqr2EpHq2masLbGh+otcXlKJ2kHd0p35x/NVph2P69lK0gZgYyvdl3kuoW2Alp1pak5otdfbZwMOBc47mkLjFcxTUHAoxXWRZfuKW680rVtR+tzn74JJHG1psWhGWVzmKxe/K7H+t8baVzTeltIYBIaDORWCFF1uMU/1BiUg/SHU9ilOJHA4UO/dCkXvWBb2CM3MUV+9Qn+qof6pxvqnGuyfnv3DMfAjxmfOW5UojfnhPM8ICCRZN2IM/9MfYarlgCSxEh4ltWe/d3Nf4gjkM7JCSOA+J3wAjxsqgVOcoZvb2uTL8btZwjfzhUlBajWyihj68PHePwVqlscPs8PQE1tkDKfLFc4wTSaJ9TeGU/RzSac2KA/TKVO8GliHRl0DQDdchcZTTERT8KGvGKiQbYpNVGz+5aITLjPzFnI5RaVpqMXw4At2dAnrIovn2FcUo3n2ISH0JWvchaq1SOr6P3QJaoM2++B9OYK293eCUONAeLUPdVS0MbN/ajVqrdzTA5/PJ0T0DGFHp8Z+KMBniz/6TLCJDuY2QisOOb6iaGZbrC3QAvsybLCyvAHAWnnXdtp12pJsZ2Gf3c87QOP39nLOHokgzFe4OeJwqaHUeH02Ct+ZgT66WTruTo8KDDSV8ga2acC4p3hHEqwC5nJ3K08w3Edd5TnJiuis56S0/+8sNZdjU9DvVzWyIXSDME1RySW+P3Kg9h6vRD+vHcv6zVvd1muCUbwSR1uhUZpwvLFbt2DwOoSnfrr7TbwgnDBn4VU/8T4GNpPO5Ycum94n7Qe2TkKltK8Zh1LkFFNPw78WypfymPJMRVDnx3IPQL/+xy3v7u+HiaJ8EvT1v4D6tfP2aY9kcryBGd/VbC4DDn7r82SI+l/7jFp6dlhv9mwOelcqVqWZk9s63jUCZ6VZH9EQYfQiZ+EvJIPSMTXvWoeLStGoY+RXKqLm3n+vjBz9LtBrF4/2prySsQfx0p6Jvz14FL4ssNDRolfBhLI0UAg+RcUe20FR3d8vRlmeQVgrK4dwbBIDzC8cYAgYd0fR2GiM/1rC+b8AAAD//xhYgOU=" + return "eJzsfc9z47aS/33+CpRPzrccHb7Hqa1XlXhe9nmTmfHaM8lha0uByJaEmAIYALRH76/fAsAfEAmApAjKHls6pDK21f1BdwPobjQaP6IH2L9HD8UKOAUJ4h1CksgM3qOLX+sfXrxDKAWRcJJLwuh79I93CCHU/AHageQkUd/mkAEW8B5t8DuEBEhJ6Ea8R/9zIUR2cYUutlLmF/+rfrdlXC4TRtdk8x6tcSbgHUJrAlkq3msGPyKKd9CCpz5ynysOnBV5+RMHPPW5oWvGd1j9GGGaIiGxJEKSRCC2RjlLBdphijeQotXe4rMoKdhobEQ4JwL4I/D6Ny5QAWAt+f10e4MMQUuU1edQpNWnDc2Gx+HvAoRcJBkBKg/+pML5APsnxtPW7wJo1eda00PwDZJC6bViJIIoOAhW8ATi4bgzlCFFTtptAKJYzYnBR74DI2F5fABIk0WXSVYICfxKMxU5TuCqls4PQVyPwFfxYP3ry5db1CHZsUyWRhSF5tkh2eVJJVC5VIziq6HEoFmgDos2lpTvl7yg8WD8AXILHMktVDxQIUCglO9Rm1EbzAOhbW4TkPxKaKpW15J6j0p2OaNx16iKJNpimmZqlbKEEkTTXrsnIlGLuiaJ1qzSzIBl4hG4ICyiaZQEaxTdYbYhaMkdbG4TIVSTxEW4zXwHcssi2qOemA6inUEzEdEM6xG3qVZsc84SEMLJ0WWIrv3eppfkxUJA0vl9RTNlxSprr3udgVzffkUCEkbTNrKG0w52jO/Vtk5SoHKx2jeeWZdvxujG8Uvjl71Hvi8foPpZ/REiFFU8Swx9EB8JlwXOTomwZNkHcJ2KBcuBLhJWdFa/XmgHrD8VuxVwteIqgmhNMqj/gHG/GoXEXEIawWjujcEgQWgCeokpjbvi4ZwAKhCIZv31vlpw7e0vCrHIgSdAJclg8f+8I2SrvyBxKcD8YjlGDtWcr0CgHUk4K6cTauD4deIahih2E/UTxpUUuyLDkjwCcrEKQZtuvBU0TUnvUBX9XiCC/BvMzI6p6TGgFYJRarUgh7QaY0E6wDhSxRbMOTSsyAcwiJxRAc+qXgNhjH67oOdXsI1ysIa7QGOouITiJtV1+uPbVDUw505j0iCLEH8vb89WWyU+EBbIkWVpDTmekxfRW3DmbmxmGZZAk/0xluzSlqgIXikTVQjMv4lxnOw9qRdSPBOqMdHxglkVyQPIk245JWu0JUKyDcc7ZED4wQ51JcagqGgaTQ5V3jyeQ4OF2o6w+eEwMM+gxwb1cE0mBedqHZsuuxu6zshmKweYOqMbXlBK6CZqqNKsn4netNS3UckonFUGmaQLI/coK3mT9C+1KRCWmouTPS5SIhfw6FPEWPaaHtL03OM1DDkoaJBG5FmRbDNv9hoqMaHTzjgs6db0ohxx6MhyKcnOncpNsWz/oidhc68Iog5BK70yeBfvy1DefkWFwBtwCMI3bBuK/q53HroAhageDJJxF+F+4n0MbCaORbnNxhvWVp8eCduf69rslNyvGYdS+BRT75Z1gBdTpgTjgz0A8kC4xjAg7WFZA2MpLHLnvtTgEgnOIF2uM4Z9f1iFHWWkE2MMSr5YIFzRVP9ma50akkziTGNHOMtYgiVeZaC+FxxsRnZEfn+jTWFNKKQGfp2Bb5bCS/UTr0QQWaOC6u9C6j7Ey9hmeA65Z1S/sY1yxdds5IKEHzHJsNv8py9KvmgYDZt7fUE1Gq5tLZ96sCjBOU6I3CsH2E29XlfLv3wL8jHWPFw2asF7C3LRC/twsRC1HvjPLKbt8m4/HkXdzL5oO2hmi3dA1qEIh7D7EQ+XYjUEksc654CkDcQB6fBMK1oq6a0s2m077DmYm8+5fmkiMYLwDviF+5kfLfQjXU2PBaAX720OGfMEh7M0CL/PaUuId0oX0CubJXf39+E5UkF+YvyB0I0Af3LsdUjkDzNQJEAOXz1e5lTyDeW5p1WON7DGReZIyY4783cPvckBKkbIw6n2OPBfjJ8MkebmxVWvO4zJdcSqqbcRmd0xJnVlkNgLCbvRQdpbcRbdcrKDmHM065ZRGb08X1R7kkjtqyNGs89LOMsy4OZKyaRzk+uaWHlBJc6pybMU9p6y1v/UxcMnLhpW/43H7hPewbDa9H8zGpHvDV1zLCQvEllw6BI/l0ib4ZxLpM8l0ucS6QHDOJdIu4GcS6QHYzyXSJ9LpM8l0tNLpB1e5tii6SfGH/4uoHB7nMdsfQo0KIfTFDJO385/MwTrisVyMw/5EgVdE0rENoo78bUmNoQ1TtMYNvxHpRdFsMeQU8jlNipPTbF3+khOoszXhq9dF66puwMzlsIiUQF7Ipk7vj7GcOGRJNqTiOkD6wx1RTlksFvAmdzGqLZvmNdUkTsVNMdNhzAng8dzQjGc3e3BmYF/kPWaBDgFviBiucNCenIyK8YywG1Hr68VwLbpBaB1TQRq8XjXRqNrgN+12Y9IWH3Zgt3QxNQUVzkrUPuQnhv1b+QWS4Q5oA1Q4FiaDixVBXa5rh5wIFQFtkq4v7b7waARyTC/gXl0HZT2tdleFRfEIWE8FUbutfFJsgPzsxxzSZIiw9wIAW2xQCzRZf2pA6H+psS73IGyu5iE0n5rwoVclqyopwvK+JLpLxVANU7NAzU81M/aVmVfoZkdkGLRg6fJhYjOOabBIOGbHG4NHw2d0hIgbVoukEegDnEkLN8vJXMhaPY0LFqhnj/1FkR3pykNBVdbYbuVyZHcv+zz+jQ1zNGRh/QZfZijPqGtuoFwyBmXph0IEQ5dhCbQrH1K1pzt0NOWJFstHLM2ENGsjO7cUNTM8ye1TyjCiNGhWKycO06xxNM19rGkhLAQLCF6V3gichucQyG9uZfQ8R5ZbQccOgpBoQVrwMnSwaKlGRBGwzOlAVTpZRn3ZOA/S7KlSawbY3B7v/GPJQbx1G2u4jLWJBGpJoGZAE+4bzZWpyfL6N17fi+799gCCR/WFCTiAdhXSv4uAOkjBbImyq1kFhBHSqlexiFbLzNCHyKCuftNreMchEJTdnbybSOEPrLsEdKlA+Ncq1PF0yWX0DqFcxLfcn66val7P5XWE1BX3CZgivdD2Qish3HcxcNesAJM55uvFeURoo87Yb/efOjhbSctpsR81uVOHWee73We73V6PvHvdWqP9Xu/0nm+2+H+m/Pdjs4n3t2Ocwl/B/K5hN8H/VyQ3lOQTkEq64m2dvNvr9wE7yAB8qjz/T5a9akE565zzcGohyL65uNU55Feu1K+cEzFjkj5kvTyxamX+lDjfAuk+gyU5y/nCyCjRXS++2F/OuJ5G9c+rPIFzyX9NqzT9FhocL2U7goNIl+HhdrPKag3y3PMGk52yi+cqWOGf3/oZ9DHBA2c6Wh4GmXIjEfj0i03O+0Hj99B0MBdBL1tQQ7YZ9CYZe9NCtG9G9Wh7MEdrinZ7pyl32Wy+xyrVp9zrGp/vi+lfIex6ps4ZXoxpyodYC+xgdGYRplvqjmm2lzrxiqi3Vml7IrJKCDG0Y5xsP+4JKxIYA59vTMjn7qdj5c6wF/kzDu3Dos3HY9udPRWEooHE8Y/6NZB5PL1n0QawTx1ziO/w8kVp5nYTBPt9Z/yG0uq+14oLegLvz3VIDnewHLGg2QDa/Cx9vI0ePyH2lbXlW/7KYkS6xqYpjX9Zev6zoOjy9DRl2F8jYuaFH4a5eKLq2GRdXWi3VloCpcOOe9lkalSO6RnmU6nYc+Y+2oHAbPzmnRwGgxs03Po/vc16QncAA2vYce052lBCzfniYws2JanPkQKN+UJQJrQkOcgXeq5Pz/cMMa04qnnT7sRz3FWPboFjwz17RjSfidK850QfFcrjViIgh07QqCmGefghjttCEP7sQzX6nCwPY1YAlD9Gpy6tAxvsBOAF1uXjtY61qWQcY114ipyWEudENg5VTm4lU4I4FRlBprotE0oot24nMO+fjnHNBMY2Cmn3g73NBm0KQWZPhQrMG566azvaeI8WujZ2ooMxMCdoV/893ua3Co4d4ps671Jtq5/0PdyqB/dNPPw4hvwBqUfk/cdypjrjBd630OUrUPknOs/3hG6iab2T4Y0smiPemt0IMSJvmsQ5AgD6EF5EmsID8ZvEp2sgUi2kBbZtK7HVuagpndOG3R5vLK0QecG8JFs+voZW55JkUUZ2H1ppQhLCbtcdklXPOvVICJbNVlddM/pmHM6pg/SOR1zTseMRHROx5zTMed0zDkdc07HODEEG3oa/q52nkEIY1p5dmKxdgPN4zZJ+P9w+rD0nzRFkiGgqTUY97Y0EPaUtMQINIEJ2EY0bUa4MYVmYs7SRc5BhSkKge7/u+vVZz+SW5aihi4q6Y4DMUU7bv4BRXgwTNOHB0WfQsq4MQb3ilSQZ22Apb2e0t+9d8yd/o21g3iai+sCMWj/7OCYmEH2Tdp3bcZ1Tdi7NpfjLi81j1bGuMJ0dOuyjniua1zE3WNRSCyLeJf+8y0W/upT9wDagwhVv9fD0YzQZdnT+go9YSL1/0jgO0JxuHwPcOrvS+DuDz4QZYNQM3HL98CBVAG5vziNUAmbTiPzI8AYPr29/jt9kW0wk/T3h9EQuqxRXeu+rEpp1xyL7W+M5T/j5IGt11fon5zr+4i3RZZdofp/y993Vas+jNfaVyvQ5TXb5RlISK8aSVxjSpm8K6hmwfgV+vz5468kyyD9oRz+wjlRxtw26n06Q1fc+u7YGLq+QttRar++/ar71AnDMqD3ysc/CaSSHaToZN3bjMjnf5Ctb/hlmbGpUu7t8VDp5flxNyqrCqV9N4ITzuhfbBVrYzXUomyrnSOR4Rsrui5xdGi0z+qmMnDSsdyWsvW+e2YM4dOQQDnLSItSfZUkUa7bhDeLmkDfkFKeuWge+e4YieX9iKUoRA407VzHD23QB9ztGL8yIaLiFxfdxnJ1I3JH7j3gCh8GkDlLtkh0su8VhCcsnO3O61UKC7msLCAaDiV0/XBDBYMX1D1B4NtM7BXlXvYp4DQj1M+5z+Y+lARq1ngtgddTSiNJmH7yhCtXZI1JZmliyP+E/+kPOFIMO0YPbyBNOcr/oOnd68sxJ14Zm80pz0iChwcPPRuOc3QlkyPvZfdfiBvyCpHn6sfBizRNq5FKLCoMbwbihZiCIDzQ42oawJL6wYNbo+CFQ6VY0jOx0khoBT2dei1efTAbxeYZ2+8mPgFluUINwShzPseOfjeDN9jg9LWQGi6uMPgEy4iFo1YaoWs2chXpm6KTYvQPDcbG3KppW6O+FDkkUy7+xsLYnQaT5mcsWK756QeWp84HZ6KDMny6gCogQ8Ik98Qe5iS4A6e5QiTH02Hbw8dutPMXemVFxQix1oHbpp9UHXa0Q44BHoQOXmbYX5zxkRdGy1OdA4ZhEYYhiiQB6MZlcZFoLkKsi6yLpkIyqs3h8B1DWWj9vtVYb9P3kFUdOLmeshrsjChYCEvrIbFQRHmAq8qDnhSZm2sLk6OSO5YaGw5IgJSEbsbqc17XPGF0TTYF11nQGmpTxFPJ8fK+s/U3LhzHWQYZEe0DtlhCtDi8eCnaWK0NJyA/9kQ9b8ROl5ymrb1NvnMV1vWJzbETozgHMe1duX5cST8fy56oMK85djfLBp3jYauI6KpHrgLI0CVsFujimjP6X2x14XeNiVgmjErOsvZthWiQPxtVm1NgwwhdXkhewMUVuljjTKj/YRxd/AdlFP5x4bbGkUex48zREJ9gj9VyPo8I7XztwebhEWRBHyh7ogG993hMUdGWrtNQqPUUf5lPhcWsBwinjyYpQb9VZR+yo0sl/iukha9EX0rebyMFLVPDwRh1Uj2ARnnAp68kwJyvLXMOQhTOJ8ZiCc/0L7stGR0txZSIh1PA/UDEw2SwrJBLtl4qzDNC/VzIz2uF92icOUlPIdPbmw9HiXSOqgirG9d8hQj1C3d27y83t3qMZTv2HlATIt8KU934/XSP2lkyf64ig0++Vmx91RG1Yp4VeYk23LTfSnDNojvd1G4u07R1E0wP1QqZFY7u6dd+PKIWMHBBhAQqH1lW7GL5Vw1ZZOg2qUXOdvovf1TrJ/z43IUqvxt4ioSnkC00a4ads5c8nC94hCpBxw7CFHniJGE81S9TM0snHveVcbyBZZLhTovGwdzvDRGkidS5mI49oSGFAD67TDJMdrMZZ5LhF2yit79fB+zTDGHS2/s/E5pCWgnDz6osaluWVjNhRtw1FY3V9Io/K5TcNAE3bayz5stdu1fCCA4/aRJIkXDzmHF+3f5+vfBNJ/f2OWnORHptZMuEXJL21l2eIx6fslHwFGl0cxsjTB/HuAwEjiu5mfEGQAtmeQXgrroCcAtU7RKLxeLYyv+Y6KZlJqqStZkSWG2FV9xceK+6aNulG9Oq2qxZWVV7iUh1bTPWlthQ/UUuL6lE7aBu6c784/kq047H9WwlaQOwsZXuyzyX0DZAy840NSe02uvtswGHAucdTaHxCuYpKDiU4rrIsn3FrVea1q0ofe7zd8Ekjra0WDSjLC7zlYvflVj/W2PtKxpvS2kMAsPBnApBii63mKd6gxKQ/hBq+xQnEjgcqPduhaJ3LAt7hGbmqK9eoT/VUP9UY/1TDfZPz/7hGPgR4zPnrUqUxvxwnmcEBJKsGzGG/+mPMNVyQJJYCY+S2rPfu7kvcQTyGVkhJHCfEz6Axw2VwCnO0M1tbfLl+N0s4Zv5wqQgtRpZRQx9+HTvnwI1y+OH2WHoiS0yhtPlCmeYJpPE+hvDKfq5pFMblIfplCleDaxDo64BoBuuQuMpJqIp+NBXDFTINsUmKjb/ctEJl5l5C7mcotI01GJ48AU7uoR1kcVz7CuK0Tz7kBD6kjXuQtVaJHX9H7oEtUGbffC+HEHb+ztBqHEgvNqHOiramNk/tRq1Vu7pgc/nEyJ6hrCjU2M/FOCzxR99JthEB3MboRWHHF9RNLMt1hZogX0ZNlhZ3gBgrbxrO+06bUm2s7DP7ucdoPF7ezlnj0QQ5ivcHHG41FBqvD4bhe/MQB/dLB13p0cFBppKeQPbNGDcU7wjCVYBc7m7lScY7qOu8pxkRXTWc1La/yNLzeXYFPT7VY1sCN0gTFNUconvjxyovccr0a+Sx7J+88S59TZcFK/E0VZolCYcTxPXLRi8DuGpXzx/Ew8vJ8xZeNVPvI+BzaRz+aHLJvjgHxreOgmV0r5mHEqRU0w9Df9aKF/KG9QzFUGd3xg+AP36H7e8u78fJoryJdXX/3Cs77VVr2RyvIEZ39VsLgMOfuvzZIj6X/uMWnp2WG/2bA56VypWpZmT2zreNQJnpVkf0RBh9CJn4S8kg9IxNa8Uh4tK0ahj5Fcqoubef6+MHP0u0GsXj/amvJKxB/HSHgC/PXjiuyyw0NGiV8GEsjRQCD5FxR7bQVHd369GWZ5BWCsrh3BsEgPMLxxgCBh3R9HYaIz/WsL5vwAAAP//cSUrwA==" } diff --git a/metricbeat/module/kubernetes/pod/_meta/fields.yml b/metricbeat/module/kubernetes/pod/_meta/fields.yml index 0537faab52a..0b46d290839 100644 --- a/metricbeat/module/kubernetes/pod/_meta/fields.yml +++ b/metricbeat/module/kubernetes/pod/_meta/fields.yml @@ -94,6 +94,11 @@ format: bytes description: > Total working set memory + - name: limit.pct + type: scaled_float + format: percent + description: > + Working set memory usage as a percentage of the defined limit for the pod containers (or total node allocatable memory if unlimited) - name: rss type: group fields: diff --git a/metricbeat/module/kubernetes/pod/data.go b/metricbeat/module/kubernetes/pod/data.go index bb24d31905c..fdb604d4e17 100644 --- a/metricbeat/module/kubernetes/pod/data.go +++ b/metricbeat/module/kubernetes/pod/data.go @@ -128,6 +128,8 @@ func eventMapping(content []byte, perfMetrics *util.PerfMetricsCache) ([]common. } if memLimit > 0 { podEvent.Put("memory.usage.limit.pct", float64(usageMem)/memLimit) + podEvent.Put("memory.working_set.limit.pct", float64(workingSet)/memLimit) + } } @@ -137,6 +139,7 @@ func eventMapping(content []byte, perfMetrics *util.PerfMetricsCache) ([]common. } if memLimit > 0 { podEvent.Put("memory.usage.limit.pct", float64(workingSet)/memLimit) + podEvent.Put("memory.working_set.limit.pct", float64(workingSet)/memLimit) } } diff --git a/metricbeat/module/kubernetes/pod/pod_test.go b/metricbeat/module/kubernetes/pod/pod_test.go index 41340e5dede..37486ad58e8 100644 --- a/metricbeat/module/kubernetes/pod/pod_test.go +++ b/metricbeat/module/kubernetes/pod/pod_test.go @@ -64,9 +64,10 @@ func TestEventMapping(t *testing.T) { "cpu.usage.node.pct": 0.005631997, "cpu.usage.limit.pct": 0.005631997, - "memory.usage.bytes": 1462272, - "memory.usage.node.pct": 0.01, - "memory.usage.limit.pct": 0.1, + "memory.usage.bytes": 1462272, + "memory.usage.node.pct": 0.01, + "memory.usage.limit.pct": 0.1, + "memory.working_set.limit.pct": 0.09943977591036414, } for k, v := range testCases { From aa248af1ba19d4c4979e810402562a5711642c9b Mon Sep 17 00:00:00 2001 From: Andrea Spacca Date: Tue, 21 Dec 2021 13:24:23 +0100 Subject: [PATCH 39/57] add/use blocking wait for test data in statsd (#29543) * add/use blocking wait for test data in statsd * refactor to avoid race condition in TestData * fix serverStarted flag --- metricbeat/mb/testing/modules.go | 35 ++++++++++++++----- .../module/airflow/statsd/data_test.go | 22 +++++++++--- .../metricbeat/module/statsd/server/server.go | 30 +++++++++++++--- 3 files changed, 69 insertions(+), 18 deletions(-) diff --git a/metricbeat/mb/testing/modules.go b/metricbeat/mb/testing/modules.go index 605e44a4d32..c5dd3ecbcdd 100644 --- a/metricbeat/mb/testing/modules.go +++ b/metricbeat/mb/testing/modules.go @@ -344,20 +344,20 @@ func NewPushMetricSetV2WithContext(t testing.TB, config interface{}) mb.PushMetr return pushMetricSet } -// capturingPushReporterV2 stores all the events and errors from a metricset's +// CapturingPushReporterV2 stores all the events and errors from a metricset's // Run method. -type capturingPushReporterV2 struct { +type CapturingPushReporterV2 struct { context.Context eventsC chan mb.Event } -func newCapturingPushReporterV2(ctx context.Context) *capturingPushReporterV2 { - return &capturingPushReporterV2{Context: ctx, eventsC: make(chan mb.Event)} +func newCapturingPushReporterV2(ctx context.Context) *CapturingPushReporterV2 { + return &CapturingPushReporterV2{Context: ctx, eventsC: make(chan mb.Event)} } // report writes an event to the output channel and returns true. If the output // is closed it returns false. -func (r *capturingPushReporterV2) report(event mb.Event) bool { +func (r *CapturingPushReporterV2) report(event mb.Event) bool { select { case <-r.Done(): // Publisher is stopped. @@ -368,16 +368,16 @@ func (r *capturingPushReporterV2) report(event mb.Event) bool { } // Event stores the passed-in event into the events array -func (r *capturingPushReporterV2) Event(event mb.Event) bool { +func (r *CapturingPushReporterV2) Event(event mb.Event) bool { return r.report(event) } // Error stores the given error into the errors array. -func (r *capturingPushReporterV2) Error(err error) bool { +func (r *CapturingPushReporterV2) Error(err error) bool { return r.report(mb.Event{Error: err}) } -func (r *capturingPushReporterV2) capture(waitEvents int) []mb.Event { +func (r *CapturingPushReporterV2) capture(waitEvents int) []mb.Event { var events []mb.Event for { select { @@ -393,6 +393,20 @@ func (r *capturingPushReporterV2) capture(waitEvents int) []mb.Event { } } +// BlockingCapture blocks until waitEvents n of events are captured +func (r *CapturingPushReporterV2) BlockingCapture(waitEvents int) []mb.Event { + var events []mb.Event + for { + select { + case e := <-r.eventsC: + events = append(events, e) + if waitEvents > 0 && len(events) >= waitEvents { + return events + } + } + } +} + // RunPushMetricSetV2 run the given push metricset for the specific amount of // time and returns all of the events and errors that occur during that period. func RunPushMetricSetV2(timeout time.Duration, waitEvents int, metricSet mb.PushMetricSetV2) []mb.Event { @@ -405,6 +419,11 @@ func RunPushMetricSetV2(timeout time.Duration, waitEvents int, metricSet mb.Push return r.capture(waitEvents) } +// GetCapturingPushReporterV2 is a factory for a capturing push metricset +func GetCapturingPushReporterV2() mb.PushReporterV2 { + return newCapturingPushReporterV2(context.Background()) +} + // RunPushMetricSetV2WithContext run the given push metricset for the specific amount of // time and returns all of the events that occur during that period. func RunPushMetricSetV2WithContext(timeout time.Duration, waitEvents int, metricSet mb.PushMetricSetV2WithContext) []mb.Event { diff --git a/x-pack/metricbeat/module/airflow/statsd/data_test.go b/x-pack/metricbeat/module/airflow/statsd/data_test.go index b8cc1ab4718..c2c07d32f34 100644 --- a/x-pack/metricbeat/module/airflow/statsd/data_test.go +++ b/x-pack/metricbeat/module/airflow/statsd/data_test.go @@ -8,8 +8,10 @@ import ( "fmt" "net" "runtime" + "sync" "testing" - "time" + + "github.com/elastic/beats/v7/x-pack/metricbeat/module/statsd/server" "github.com/stretchr/testify/require" @@ -36,7 +38,7 @@ func getConfig() map[string]interface{} { "host": STATSD_HOST, "port": STATSD_PORT, "period": "100ms", - "ttl": "0s", + "ttl": "1ms", } } @@ -58,12 +60,22 @@ func TestData(t *testing.T) { ms := mbtest.NewPushMetricSetV2(t, getConfig()) var events []mb.Event + var reporter mb.PushReporterV2 done := make(chan interface{}) - go func() { - events = mbtest.RunPushMetricSetV2(30*time.Second, 1, ms) + wg := new(sync.WaitGroup) + wg.Add(1) + go func(wg *sync.WaitGroup) { + reporter = mbtest.GetCapturingPushReporterV2() + ms.(*server.MetricSet).ServerStart() + wg.Done() + + go ms.Run(reporter) + events = reporter.(*mbtest.CapturingPushReporterV2).BlockingCapture(1) + close(done) - }() + }(wg) + wg.Wait() createEvent(t) <-done diff --git a/x-pack/metricbeat/module/statsd/server/server.go b/x-pack/metricbeat/module/statsd/server/server.go index b9df691b6b8..a32129910fe 100644 --- a/x-pack/metricbeat/module/statsd/server/server.go +++ b/x-pack/metricbeat/module/statsd/server/server.go @@ -71,9 +71,10 @@ func defaultConfig() Config { // multiple fetch calls. type MetricSet struct { mb.BaseMetricSet - server serverhelper.Server - processor *metricProcessor - mappings map[string]StatsdMapping + server serverhelper.Server + serverStarted bool + processor *metricProcessor + mappings map[string]StatsdMapping } // New create a new instance of the MetricSet @@ -191,13 +192,32 @@ func (m *MetricSet) getEvents() []*mb.Event { return events } +// ServerStart starts the underlying m.server +func (m *MetricSet) ServerStart() { + if m.serverStarted { + return + } + m.server.Start() + m.serverStarted = true +} + +// ServerStop stops the underlying m.server +func (m *MetricSet) ServerStop() { + if !m.serverStarted { + return + } + + m.server.Stop() + m.serverStarted = false +} + // Run method provides the module with a reporter with which events can be reported. func (m *MetricSet) Run(reporter mb.PushReporterV2) { period := m.Module().Config().Period // Start event watcher - m.server.Start() - defer m.server.Stop() + m.ServerStart() + defer m.ServerStop() reportPeriod := time.NewTicker(period) for { From 5648bc10edd5ce45ed49c072ad2ba3ebbe3b6c8c Mon Sep 17 00:00:00 2001 From: Michael Katsoulis Date: Tue, 21 Dec 2021 16:55:48 +0200 Subject: [PATCH 40/57] Populate container.id field in container metricset (#29560) * Populate container.id field in container metricset --- CHANGELOG.next.asciidoc | 1 + metricbeat/module/kubernetes/util/kubernetes.go | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9c736fd6c64..9e28195c4ab 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -145,6 +145,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Preliminary AIX support {pull}27954[27954] - Add option to skip older k8s events {pull}29396[29396] - Add `add_resource_metadata` configuration to Kubernetes module. {pull}29133[29133] +- Add `container.id` and `container.runtime` ECS fields in container metricset. {pull}29560[29560] - Add `memory.workingset.limit.pct` field in Kubernetes container/pod metricset. {pull}29547[29547] *Packetbeat* diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index b173c6de360..56e9684eee3 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -198,6 +198,14 @@ func NewContainerMetadataEnricher( pod := r.(*kubernetes.Pod) meta := metaGen.Generate(pod) + statuses := make(map[string]*kubernetes.PodContainerStatus) + mapStatuses := func(s []kubernetes.PodContainerStatus) { + for i := range s { + statuses[s[i].Name] = &s[i] + } + } + mapStatuses(pod.Status.ContainerStatuses) + mapStatuses(pod.Status.InitContainerStatuses) for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { cuid := ContainerUID(pod.GetObjectMeta().GetNamespace(), pod.GetObjectMeta().GetName(), container.Name) @@ -213,6 +221,15 @@ func NewContainerMetadataEnricher( } } + if s, ok := statuses[container.Name]; ok { + // Extracting id and runtime ECS fields from ContainerID + // which is in the form of :// + split := strings.Index(s.ContainerID, "://") + if split != -1 { + meta.Put("container.id", s.ContainerID[split+3:]) + meta.Put("container.runtime", s.ContainerID[:split]) + } + } id := join(pod.GetObjectMeta().GetNamespace(), pod.GetObjectMeta().GetName(), container.Name) m[id] = meta } From 76bf18b0c2be4c8adf1f2a0f9168aadcd04e8593 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Tue, 21 Dec 2021 10:50:42 -0800 Subject: [PATCH 41/57] Update monitoring-internal-collection.asciidoc (#27624) (#29551) just to add a clear description regarding cluster_uuid Co-authored-by: Daisuke Harada <1519063+dharada@users.noreply.github.com> --- libbeat/docs/monitoring/monitoring-internal-collection.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libbeat/docs/monitoring/monitoring-internal-collection.asciidoc b/libbeat/docs/monitoring/monitoring-internal-collection.asciidoc index 3263777c9f4..f626ed93a3c 100644 --- a/libbeat/docs/monitoring/monitoring-internal-collection.asciidoc +++ b/libbeat/docs/monitoring/monitoring-internal-collection.asciidoc @@ -85,7 +85,7 @@ monitoring: <1> This setting identifies the {es} cluster under which the monitoring data for this {beatname_uc} instance will appear in the Stack Monitoring UI. To get a cluster's `cluster_uuid`, -call the `GET /` API against that cluster. +call the `GET /` API against that production cluster. <2> This setting identifies the hosts and port numbers of {es} nodes that are part of the monitoring cluster. <3> Specify one of `api_key` or `username`/`password`. From 391a948ec15e55735367429bec81c22c539a444a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Wed, 22 Dec 2021 12:06:50 +0100 Subject: [PATCH 42/57] Follow up changes in documentation after #28927 (#29501) * Follow up changes in the docs for logging * add reference configuration --- auditbeat/auditbeat.reference.yml | 3 ++- filebeat/filebeat.reference.yml | 3 ++- heartbeat/heartbeat.reference.yml | 3 ++- libbeat/_meta/config/output-file.reference.yml.tmpl | 4 ++-- libbeat/outputs/fileout/docs/fileout.asciidoc | 3 ++- metricbeat/metricbeat.reference.yml | 3 ++- packetbeat/packetbeat.reference.yml | 3 ++- winlogbeat/winlogbeat.reference.yml | 3 ++- x-pack/auditbeat/auditbeat.reference.yml | 3 ++- x-pack/filebeat/filebeat.reference.yml | 3 ++- x-pack/heartbeat/heartbeat.reference.yml | 3 ++- x-pack/metricbeat/metricbeat.reference.yml | 3 ++- x-pack/packetbeat/packetbeat.reference.yml | 3 ++- x-pack/winlogbeat/winlogbeat.reference.yml | 3 ++- 14 files changed, 28 insertions(+), 15 deletions(-) diff --git a/auditbeat/auditbeat.reference.yml b/auditbeat/auditbeat.reference.yml index 544cd80ade2..27be08dd194 100644 --- a/auditbeat/auditbeat.reference.yml +++ b/auditbeat/auditbeat.reference.yml @@ -1057,7 +1057,7 @@ output.elasticsearch: #path: "/tmp/auditbeat" # Name of the generated files. The default is `auditbeat` and it generates - # files: `auditbeat`, `auditbeat.1`, `auditbeat.2`, etc. + # files: `auditbeat-{datetime}.ndjson`, `auditbeat-{datetime}-1.ndjson`, etc. #filename: auditbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -1075,6 +1075,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index b63760c63dd..912a8f9bcb6 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -1990,7 +1990,7 @@ output.elasticsearch: #path: "/tmp/filebeat" # Name of the generated files. The default is `filebeat` and it generates - # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. + # files: `filebeat-{datetime}.ndjson`, `filebeat-{datetime}-1.ndjson`, etc. #filename: filebeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -2008,6 +2008,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/heartbeat/heartbeat.reference.yml b/heartbeat/heartbeat.reference.yml index 76dcc7758b9..aca74cd9e63 100644 --- a/heartbeat/heartbeat.reference.yml +++ b/heartbeat/heartbeat.reference.yml @@ -1203,7 +1203,7 @@ output.elasticsearch: #path: "/tmp/heartbeat" # Name of the generated files. The default is `heartbeat` and it generates - # files: `heartbeat`, `heartbeat.1`, `heartbeat.2`, etc. + # files: `heartbeat-{datetime}.ndjson`, `heartbeat-{datetime}-1.ndjson`, etc. #filename: heartbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -1221,6 +1221,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/libbeat/_meta/config/output-file.reference.yml.tmpl b/libbeat/_meta/config/output-file.reference.yml.tmpl index 7f7ff9998b3..665b8f3a7c1 100644 --- a/libbeat/_meta/config/output-file.reference.yml.tmpl +++ b/libbeat/_meta/config/output-file.reference.yml.tmpl @@ -16,7 +16,7 @@ #path: "/tmp/{{.BeatName}}" # Name of the generated files. The default is `{{.BeatName}}` and it generates - # files: `{{.BeatName}}`, `{{.BeatName}}.1`, `{{.BeatName}}.2`, etc. + # files: `{{.BeatName}}-{datetime}.ndjson`, `{{.BeatName}}-{datetime}-1.ndjson`, etc. #filename: {{.BeatName}} # Maximum size in kilobytes of each file. When this size is reached, and on @@ -33,4 +33,4 @@ #permissions: 0600 # Configure automatic file rotation on every startup. The default is true. - #rotate_on_startup: true \ No newline at end of file + #rotate_on_startup: true diff --git a/libbeat/outputs/fileout/docs/fileout.asciidoc b/libbeat/outputs/fileout/docs/fileout.asciidoc index a0979f92ef5..f4dc7c5f95d 100644 --- a/libbeat/outputs/fileout/docs/fileout.asciidoc +++ b/libbeat/outputs/fileout/docs/fileout.asciidoc @@ -45,7 +45,8 @@ mandatory. ===== `filename` The name of the generated files. The default is set to the Beat name. For example, the files -generated by default for {beatname_uc} would be "{beatname_lc}", "{beatname_lc}.1", "{beatname_lc}.2", and so on. +generated by default for {beatname_uc} would be "{beatname_lc}-{{datetime}}.ndjson", "{beatname_lc}-{{datetime}}-1.ndjson", +"{beatname_lc}-{{datetime}}-2.ndjson", and so on. ===== `rotate_every_kb` diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index d0ef9b6f6d3..2d4c51aea50 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -1920,7 +1920,7 @@ output.elasticsearch: #path: "/tmp/metricbeat" # Name of the generated files. The default is `metricbeat` and it generates - # files: `metricbeat`, `metricbeat.1`, `metricbeat.2`, etc. + # files: `metricbeat-{datetime}.ndjson`, `metricbeat-{datetime}-1.ndjson`, etc. #filename: metricbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -1938,6 +1938,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/packetbeat/packetbeat.reference.yml b/packetbeat/packetbeat.reference.yml index 3d9d0f604bf..739fe5c5332 100644 --- a/packetbeat/packetbeat.reference.yml +++ b/packetbeat/packetbeat.reference.yml @@ -1552,7 +1552,7 @@ output.elasticsearch: #path: "/tmp/packetbeat" # Name of the generated files. The default is `packetbeat` and it generates - # files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. + # files: `packetbeat-{datetime}.ndjson`, `packetbeat-{datetime}-1.ndjson`, etc. #filename: packetbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -1570,6 +1570,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/winlogbeat/winlogbeat.reference.yml b/winlogbeat/winlogbeat.reference.yml index 11a5ac82351..51ca51897f8 100644 --- a/winlogbeat/winlogbeat.reference.yml +++ b/winlogbeat/winlogbeat.reference.yml @@ -988,7 +988,7 @@ output.elasticsearch: #path: "/tmp/winlogbeat" # Name of the generated files. The default is `winlogbeat` and it generates - # files: `winlogbeat`, `winlogbeat.1`, `winlogbeat.2`, etc. + # files: `winlogbeat-{datetime}.ndjson`, `winlogbeat-{datetime}-1.ndjson`, etc. #filename: winlogbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -1006,6 +1006,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/x-pack/auditbeat/auditbeat.reference.yml b/x-pack/auditbeat/auditbeat.reference.yml index 5219627ac63..b058641150c 100644 --- a/x-pack/auditbeat/auditbeat.reference.yml +++ b/x-pack/auditbeat/auditbeat.reference.yml @@ -1113,7 +1113,7 @@ output.elasticsearch: #path: "/tmp/auditbeat" # Name of the generated files. The default is `auditbeat` and it generates - # files: `auditbeat`, `auditbeat.1`, `auditbeat.2`, etc. + # files: `auditbeat-{datetime}.ndjson`, `auditbeat-{datetime}-1.ndjson`, etc. #filename: auditbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -1131,6 +1131,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index 796d6934046..e65e6990c62 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -4220,7 +4220,7 @@ output.elasticsearch: #path: "/tmp/filebeat" # Name of the generated files. The default is `filebeat` and it generates - # files: `filebeat`, `filebeat.1`, `filebeat.2`, etc. + # files: `filebeat-{datetime}.ndjson`, `filebeat-{datetime}-1.ndjson`, etc. #filename: filebeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -4238,6 +4238,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/x-pack/heartbeat/heartbeat.reference.yml b/x-pack/heartbeat/heartbeat.reference.yml index 76dcc7758b9..aca74cd9e63 100644 --- a/x-pack/heartbeat/heartbeat.reference.yml +++ b/x-pack/heartbeat/heartbeat.reference.yml @@ -1203,7 +1203,7 @@ output.elasticsearch: #path: "/tmp/heartbeat" # Name of the generated files. The default is `heartbeat` and it generates - # files: `heartbeat`, `heartbeat.1`, `heartbeat.2`, etc. + # files: `heartbeat-{datetime}.ndjson`, `heartbeat-{datetime}-1.ndjson`, etc. #filename: heartbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -1221,6 +1221,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index e09b1c7989b..2c3a35d801b 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -2441,7 +2441,7 @@ output.elasticsearch: #path: "/tmp/metricbeat" # Name of the generated files. The default is `metricbeat` and it generates - # files: `metricbeat`, `metricbeat.1`, `metricbeat.2`, etc. + # files: `metricbeat-{datetime}.ndjson`, `metricbeat-{datetime}-1.ndjson`, etc. #filename: metricbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -2459,6 +2459,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/x-pack/packetbeat/packetbeat.reference.yml b/x-pack/packetbeat/packetbeat.reference.yml index 3d9d0f604bf..739fe5c5332 100644 --- a/x-pack/packetbeat/packetbeat.reference.yml +++ b/x-pack/packetbeat/packetbeat.reference.yml @@ -1552,7 +1552,7 @@ output.elasticsearch: #path: "/tmp/packetbeat" # Name of the generated files. The default is `packetbeat` and it generates - # files: `packetbeat`, `packetbeat.1`, `packetbeat.2`, etc. + # files: `packetbeat-{datetime}.ndjson`, `packetbeat-{datetime}-1.ndjson`, etc. #filename: packetbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -1570,6 +1570,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. diff --git a/x-pack/winlogbeat/winlogbeat.reference.yml b/x-pack/winlogbeat/winlogbeat.reference.yml index 940cd6125a0..d74e971638a 100644 --- a/x-pack/winlogbeat/winlogbeat.reference.yml +++ b/x-pack/winlogbeat/winlogbeat.reference.yml @@ -1031,7 +1031,7 @@ output.elasticsearch: #path: "/tmp/winlogbeat" # Name of the generated files. The default is `winlogbeat` and it generates - # files: `winlogbeat`, `winlogbeat.1`, `winlogbeat.2`, etc. + # files: `winlogbeat-{datetime}.ndjson`, `winlogbeat-{datetime}-1.ndjson`, etc. #filename: winlogbeat # Maximum size in kilobytes of each file. When this size is reached, and on @@ -1049,6 +1049,7 @@ output.elasticsearch: # Configure automatic file rotation on every startup. The default is true. #rotate_on_startup: true + # ------------------------------- Console Output ------------------------------- #output.console: # Boolean flag to enable or disable the output module. From 3a6bd25c4df97fdf6109df2e99cc157e4bd27a24 Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Wed, 22 Dec 2021 09:17:14 -0500 Subject: [PATCH 43/57] Save docker-compose logs from integration tests. (#29504) Save docker-compose logs from system and integration tests. Allows auditing service logs for issues, in particular it enabled auditing the elasticsearch logs for deprecation warnings. The docker log files are now saved under build/system-tests/docker-logs and preserved as artifacts in Jenkins. Co-authored-by: Victor Martinez --- Jenkinsfile | 1 + dev-tools/mage/integtest_docker.go | 40 ++++++++++++++++++++++++- dev-tools/mage/target/common/package.go | 8 +++-- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 04423d77b91..0f8fbcf2596 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -702,6 +702,7 @@ def withBeatsEnv(Map args = [:], Closure body) { error("Error '${err.toString()}'") } finally { if (archive) { + archiveArtifacts(allowEmptyArchive: true, artifacts: "${directory}/build/system-tests/docker-logs/TEST-docker-compose-*.log") archiveTestOutput(directory: directory, testResults: testResults, artifacts: artifacts, id: args.id, upload: upload) } tearDown() diff --git a/dev-tools/mage/integtest_docker.go b/dev-tools/mage/integtest_docker.go index e9767f68436..94d9288d1fa 100644 --- a/dev-tools/mage/integtest_docker.go +++ b/dev-tools/mage/integtest_docker.go @@ -81,7 +81,7 @@ func (d *DockerIntegrationTester) StepRequirements() IntegrationTestSteps { } // Test performs the tests with docker-compose. -func (d *DockerIntegrationTester) Test(_ string, mageTarget string, env map[string]string) error { +func (d *DockerIntegrationTester) Test(dir string, mageTarget string, env map[string]string) error { var err error d.buildImagesOnce.Do(func() { err = dockerComposeBuildImages() }) if err != nil { @@ -139,6 +139,12 @@ func (d *DockerIntegrationTester) Test(_ string, mageTarget string, env map[stri args..., ) + err = saveDockerComposeLogs(dir, mageTarget, composeEnv) + if err != nil && testErr == nil { + // saving docker-compose logs failed but the test didn't. + return err + } + // Docker-compose rm is noisy. So only pass through stderr when in verbose. out := ioutil.Discard if mg.Verbose() { @@ -160,6 +166,38 @@ func (d *DockerIntegrationTester) Test(_ string, mageTarget string, env map[stri return testErr } +func saveDockerComposeLogs(rootDir string, mageTarget string, composeEnv map[string]string) error { + var ( + composeLogDir = filepath.Join(rootDir, "build", "system-tests", "docker-logs") + composeLogFileName = filepath.Join(composeLogDir, "TEST-docker-compose-"+mageTarget+".log") + ) + + if err := os.MkdirAll(composeLogDir, os.ModeDir|os.ModePerm); err != nil { + return fmt.Errorf("creating docker log dir: %w", err) + } + + composeLogFile, err := os.Create(composeLogFileName) + if err != nil { + return fmt.Errorf("creating docker log file: %w", err) + } + defer composeLogFile.Close() + + _, err = sh.Exec( + composeEnv, + composeLogFile, // stdout + composeLogFile, // stderr + "docker-compose", + "-p", dockerComposeProjectName(), + "logs", + "--no-color", + ) + if err != nil { + return fmt.Errorf("executing docker-compose logs: %w", err) + } + + return nil +} + // InsideTest performs the tests inside of environment. func (d *DockerIntegrationTester) InsideTest(test func() error) error { // Fix file permissions after test is done writing files as root. diff --git a/dev-tools/mage/target/common/package.go b/dev-tools/mage/target/common/package.go index e8849adb26d..2054538df1f 100644 --- a/dev-tools/mage/target/common/package.go +++ b/dev-tools/mage/target/common/package.go @@ -30,8 +30,10 @@ import ( func PackageSystemTests() error { excludeds := []string{".ci", ".git", ".github", "vendor", "dev-tools"} - // include run as it's the directory we want to compress - systemTestsDir := filepath.Join("build", "system-tests", "run") + // include run and docker-logs as they are the directories we want to compress + systemTestsDir := filepath.Join("build", "system-tests") + systemTestsRunDir := filepath.Join(systemTestsDir, "run") + systemTestsLogDir := filepath.Join(systemTestsDir, "docker-logs") files, err := devtools.FindFilesRecursive(func(path string, _ os.FileInfo) bool { base := filepath.Base(path) for _, excluded := range excludeds { @@ -40,7 +42,7 @@ func PackageSystemTests() error { } } - return strings.HasPrefix(path, systemTestsDir) + return strings.HasPrefix(path, systemTestsRunDir) || strings.HasPrefix(path, systemTestsLogDir) }) if err != nil { return err From 78e1c58aa905dd8122d4495948b27da3076bd46c Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Thu, 23 Dec 2021 13:55:28 +0000 Subject: [PATCH 44/57] Add support for latest k8s versions (#29575) --- .ci/scripts/kind-setup.sh | 20 +------------------ CHANGELOG.next.asciidoc | 1 + deploy/kubernetes/Jenkinsfile.yml | 2 +- metricbeat/docs/modules/kubernetes.asciidoc | 2 +- .../module/kubernetes/_meta/docs.asciidoc | 2 +- .../module/kubernetes/test/integration.go | 20 +++++++++++-------- 6 files changed, 17 insertions(+), 30 deletions(-) diff --git a/.ci/scripts/kind-setup.sh b/.ci/scripts/kind-setup.sh index d2c9eeeb771..fa4f66dd6e6 100755 --- a/.ci/scripts/kind-setup.sh +++ b/.ci/scripts/kind-setup.sh @@ -1,23 +1,5 @@ #!/usr/bin/env bash set -exuo pipefail -kind create cluster --image kindest/node:${K8S_VERSION} --config - < Date: Thu, 23 Dec 2021 14:59:31 +0100 Subject: [PATCH 45/57] elasticsearch module - xpack enabled parity (#29548) * elasticsearch module - xpack enabled parity * flip ActiveOnly default * add ml_job's node * lint * update shard cluster's state path * add back state.id to prevent breaking changes --- .../module/elasticsearch/index_recovery/index_recovery.go | 2 +- metricbeat/module/elasticsearch/ml_job/_meta/data.json | 5 +++-- metricbeat/module/elasticsearch/ml_job/data.go | 7 ++++++- metricbeat/module/elasticsearch/shard/_meta/data.json | 7 ++++++- metricbeat/module/elasticsearch/shard/data.go | 2 ++ 5 files changed, 18 insertions(+), 5 deletions(-) diff --git a/metricbeat/module/elasticsearch/index_recovery/index_recovery.go b/metricbeat/module/elasticsearch/index_recovery/index_recovery.go index eb2eaf93bf6..5b5243de9f9 100644 --- a/metricbeat/module/elasticsearch/index_recovery/index_recovery.go +++ b/metricbeat/module/elasticsearch/index_recovery/index_recovery.go @@ -43,7 +43,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { config := struct { ActiveOnly bool `config:"index_recovery.active_only"` }{ - ActiveOnly: true, + ActiveOnly: false, } if err := base.Module().UnpackConfig(&config); err != nil { return nil, err diff --git a/metricbeat/module/elasticsearch/ml_job/_meta/data.json b/metricbeat/module/elasticsearch/ml_job/_meta/data.json index 0e79d5bad91..6a5e73f48f3 100644 --- a/metricbeat/module/elasticsearch/ml_job/_meta/data.json +++ b/metricbeat/module/elasticsearch/ml_job/_meta/data.json @@ -22,7 +22,8 @@ } }, "node": { - "id": "27fb1c2fd783" + "id": "27fb1c2fd783", + "name": "node-1" } }, "event": { @@ -39,4 +40,4 @@ "name": "elasticsearch", "type": "elasticsearch" } -} \ No newline at end of file +} diff --git a/metricbeat/module/elasticsearch/ml_job/data.go b/metricbeat/module/elasticsearch/ml_job/data.go index 28c296e1e5f..a23c2677f1b 100644 --- a/metricbeat/module/elasticsearch/ml_job/data.go +++ b/metricbeat/module/elasticsearch/ml_job/data.go @@ -81,7 +81,12 @@ func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte, isX event.ModuleFields = common.MapStr{} event.ModuleFields.Put("cluster.name", info.ClusterName) event.ModuleFields.Put("cluster.id", info.ClusterID) - event.ModuleFields.Put("node.id", info.Name) + + if node, exists := job["node"]; exists { + nodeHash := node.(map[string]interface{}) + event.ModuleFields.Put("node.id", nodeHash["id"]) + event.ModuleFields.Put("node.name", nodeHash["name"]) + } event.MetricSetFields, _ = schema.Apply(job) diff --git a/metricbeat/module/elasticsearch/shard/_meta/data.json b/metricbeat/module/elasticsearch/shard/_meta/data.json index b6278e225f5..e56708307e7 100644 --- a/metricbeat/module/elasticsearch/shard/_meta/data.json +++ b/metricbeat/module/elasticsearch/shard/_meta/data.json @@ -6,6 +6,11 @@ "name": "docker-cluster", "state": { "id": "XNIdeSZxQwyItvGfR5fHSw" + }, + "stats": { + "state": { + "state_uuid": "XNIdeSZxQwyItvGfR5fHSw" + } } }, "index": { @@ -41,4 +46,4 @@ "address": "172.19.0.2:9200", "type": "elasticsearch" } -} \ No newline at end of file +} diff --git a/metricbeat/module/elasticsearch/shard/data.go b/metricbeat/module/elasticsearch/shard/data.go index 2dd2f93c023..575be1d7184 100644 --- a/metricbeat/module/elasticsearch/shard/data.go +++ b/metricbeat/module/elasticsearch/shard/data.go @@ -73,6 +73,7 @@ func eventsMapping(r mb.ReporterV2, content []byte, isXpack bool) error { } event.ModuleFields.Put("cluster.state.id", stateData.StateID) + event.ModuleFields.Put("cluster.stats.state.state_uuid", stateData.StateID) event.ModuleFields.Put("cluster.id", stateData.ClusterID) event.ModuleFields.Put("cluster.name", stateData.ClusterName) @@ -117,6 +118,7 @@ func eventsMapping(r mb.ReporterV2, content []byte, isXpack bool) error { errs = append(errs, errors.Wrap(err, "failure getting source node information")) continue } + event.ModuleFields.Put("node.name", sourceNode["name"]) event.MetricSetFields.Put("source_node", sourceNode) } From 9b19aaeaf0d03521c701c4f498ba3dacdfd94970 Mon Sep 17 00:00:00 2001 From: Tetiana Kravchenko Date: Thu, 23 Dec 2021 15:00:32 +0100 Subject: [PATCH 46/57] Align elastic-agent-standalone manifest with the kubernetes package changes (#29595) * align elastic-agent-standalone manifest with the managed version Signed-off-by: Tetiana Kravchenko * revetn docker image version Signed-off-by: Tetiana Kravchenko * remove ES_HOST used to run test locally Signed-off-by: Tetiana Kravchenko * set default values for container parser implicitly Signed-off-by: Tetiana Kravchenko * remove skip_older as it is a default value anyway Signed-off-by: Tetiana Kravchenko --- .../elastic-agent-standalone-kubernetes.yaml | 76 +++++++++++++++++-- ...-agent-standalone-daemonset-configmap.yaml | 76 +++++++++++++++++-- 2 files changed, 138 insertions(+), 14 deletions(-) diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 6b306051eb2..9f58ec9c4f3 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -32,7 +32,7 @@ data: meta: package: name: kubernetes - version: 0.2.8 + version: 1.9.0 data_stream: namespace: default streams: @@ -72,6 +72,15 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + - data_stream: + dataset: kubernetes.state_daemonset + type: metrics + metricsets: + - state_daemonset + add_metadata: true + hosts: + - 'kube-state-metrics:8080' + period: 10s - data_stream: dataset: kubernetes.state_deployment type: metrics @@ -214,20 +223,73 @@ data: fields: ecs.version: 1.12.0 - name: container-log - type: logfile + type: filestream use_output: default meta: package: - name: log - version: 0.4.6 + name: kubernetes + version: 1.9.0 data_stream: namespace: default streams: - data_stream: - dataset: generic - symlinks: true + dataset: kubernetes.container_logs + type: logs + prospector.scanner.symlinks: true + parsers: + - container: ~ + # - ndjson: + # target: json + # - multiline: + # type: pattern + # pattern: '^\[' + # negate: true + # match: after paths: - /var/log/containers/*${kubernetes.container.id}.log + - name: audit-log + type: filestream + use_output: default + meta: + package: + name: kubernetes + version: 1.9.0 + data_stream: + namespace: default + streams: + - data_stream: + dataset: kubernetes.audit_logs + type: logs + exclude_files: + - .gz$ + parsers: + - ndjson: + add_error_key: true + target: kubernetes_audit + paths: + - /var/log/kubernetes/kube-apiserver-audit.log + processors: + - rename: + fields: + - from: kubernetes_audit + to: kubernetes.audit + - script: + id: dedot_annotations + lang: javascript + source: | + function process(event) { + var audit = event.Get("kubernetes.audit"); + for (var annotation in audit["annotations"]) { + var annotation_dedoted = annotation.replace(/\./g,'_') + event.Rename("kubernetes.audit.annotations."+annotation, "kubernetes.audit.annotations."+annotation_dedoted) + } + return event; + } function test() { + var event = process(new Event({ "kubernetes": { "audit": { "annotations": { "authorization.k8s.io/decision": "allow", "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"system:kube-scheduler\" of ClusterRole \"system:kube-scheduler\" to User \"system:kube-scheduler\"" } } } })); + if (event.Get("kubernetes.audit.annotations.authorization_k8s_io/decision") !== "allow") { + throw "expected kubernetes.audit.annotations.authorization_k8s_io/decision === allow"; + } + } - name: system-metrics type: system/metrics use_output: default @@ -332,7 +394,7 @@ data: meta: package: name: kubernetes - version: 0.2.8 + version: 1.9.0 data_stream: namespace: default streams: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index d3f290b2aab..0ad4f609883 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -32,7 +32,7 @@ data: meta: package: name: kubernetes - version: 0.2.8 + version: 1.9.0 data_stream: namespace: default streams: @@ -72,6 +72,15 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + - data_stream: + dataset: kubernetes.state_daemonset + type: metrics + metricsets: + - state_daemonset + add_metadata: true + hosts: + - 'kube-state-metrics:8080' + period: 10s - data_stream: dataset: kubernetes.state_deployment type: metrics @@ -214,20 +223,73 @@ data: fields: ecs.version: 1.12.0 - name: container-log - type: logfile + type: filestream use_output: default meta: package: - name: log - version: 0.4.6 + name: kubernetes + version: 1.9.0 data_stream: namespace: default streams: - data_stream: - dataset: generic - symlinks: true + dataset: kubernetes.container_logs + type: logs + prospector.scanner.symlinks: true + parsers: + - container: ~ + # - ndjson: + # target: json + # - multiline: + # type: pattern + # pattern: '^\[' + # negate: true + # match: after paths: - /var/log/containers/*${kubernetes.container.id}.log + - name: audit-log + type: filestream + use_output: default + meta: + package: + name: kubernetes + version: 1.9.0 + data_stream: + namespace: default + streams: + - data_stream: + dataset: kubernetes.audit_logs + type: logs + exclude_files: + - .gz$ + parsers: + - ndjson: + add_error_key: true + target: kubernetes_audit + paths: + - /var/log/kubernetes/kube-apiserver-audit.log + processors: + - rename: + fields: + - from: kubernetes_audit + to: kubernetes.audit + - script: + id: dedot_annotations + lang: javascript + source: | + function process(event) { + var audit = event.Get("kubernetes.audit"); + for (var annotation in audit["annotations"]) { + var annotation_dedoted = annotation.replace(/\./g,'_') + event.Rename("kubernetes.audit.annotations."+annotation, "kubernetes.audit.annotations."+annotation_dedoted) + } + return event; + } function test() { + var event = process(new Event({ "kubernetes": { "audit": { "annotations": { "authorization.k8s.io/decision": "allow", "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"system:kube-scheduler\" of ClusterRole \"system:kube-scheduler\" to User \"system:kube-scheduler\"" } } } })); + if (event.Get("kubernetes.audit.annotations.authorization_k8s_io/decision") !== "allow") { + throw "expected kubernetes.audit.annotations.authorization_k8s_io/decision === allow"; + } + } - name: system-metrics type: system/metrics use_output: default @@ -332,7 +394,7 @@ data: meta: package: name: kubernetes - version: 0.2.8 + version: 1.9.0 data_stream: namespace: default streams: From bea8e454ea289e28ea260a8c88e7997364da2a75 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Thu, 23 Dec 2021 09:22:28 -0800 Subject: [PATCH 47/57] Remove references to username/password (#29458) * Remove references to username/password * restore ouput username/password * Update CHANGELOG --- x-pack/elastic-agent/CHANGELOG.next.asciidoc | 1 + .../_meta/config/common.p2.yml.tmpl | 8 +-- .../_meta/config/common.reference.p2.yml.tmpl | 8 +-- .../config/elastic-agent.docker.yml.tmpl | 3 +- x-pack/elastic-agent/_meta/elastic-agent.yml | 3 +- x-pack/elastic-agent/elastic-agent.docker.yml | 3 +- .../elastic-agent/elastic-agent.reference.yml | 8 +-- x-pack/elastic-agent/elastic-agent.yml | 8 +-- .../elastic-agent/pkg/agent/cmd/container.go | 17 +---- .../pkg/agent/cmd/enroll_cmd_test.go | 6 -- .../pkg/agent/cmd/setup_config.go | 8 --- .../pkg/agent/configuration/fleet_server.go | 17 +---- x-pack/elastic-agent/pkg/remote/client.go | 14 ---- .../elastic-agent/pkg/remote/client_test.go | 65 ------------------- x-pack/elastic-agent/pkg/remote/config.go | 9 --- .../elastic-agent/pkg/remote/config_test.go | 2 - .../pkg/remote/round_trippers.go | 30 --------- 17 files changed, 24 insertions(+), 186 deletions(-) diff --git a/x-pack/elastic-agent/CHANGELOG.next.asciidoc b/x-pack/elastic-agent/CHANGELOG.next.asciidoc index 374c63acdb0..8164397c26e 100644 --- a/x-pack/elastic-agent/CHANGELOG.next.asciidoc +++ b/x-pack/elastic-agent/CHANGELOG.next.asciidoc @@ -14,6 +14,7 @@ - Default to port 80 and 443 for Kibana and Fleet Server connections. {pull}25723[25723] - Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull}28006[28006] - The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull}28165[28165] +- Remove username/password for fleet-server authentication. {pull}29458[29458] ==== Bugfixes - Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull}20779[20779] diff --git a/x-pack/elastic-agent/_meta/config/common.p2.yml.tmpl b/x-pack/elastic-agent/_meta/config/common.p2.yml.tmpl index e8f4c31e8e1..bbadcdc1055 100644 --- a/x-pack/elastic-agent/_meta/config/common.p2.yml.tmpl +++ b/x-pack/elastic-agent/_meta/config/common.p2.yml.tmpl @@ -5,8 +5,9 @@ outputs: default: type: elasticsearch hosts: [127.0.0.1:9200] - username: elastic - password: changeme + api-key: "example-key" + # username: "elastic" + # password: "changeme" inputs: - type: system/metrics @@ -74,8 +75,7 @@ inputs: # # optional values # #protocol: "https" -# #username: "elastic" -# #password: "changeme" +# #service_token: "example-token" # #path: "" # #ssl.verification_mode: full # #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] diff --git a/x-pack/elastic-agent/_meta/config/common.reference.p2.yml.tmpl b/x-pack/elastic-agent/_meta/config/common.reference.p2.yml.tmpl index 8a3ef077357..bfb84102e3c 100644 --- a/x-pack/elastic-agent/_meta/config/common.reference.p2.yml.tmpl +++ b/x-pack/elastic-agent/_meta/config/common.reference.p2.yml.tmpl @@ -5,8 +5,9 @@ outputs: default: type: elasticsearch hosts: [127.0.0.1:9200] - username: elastic - password: changeme + api-key: "example-key" + # username: "elastic" + # password: "changeme" inputs: - type: system/metrics @@ -43,8 +44,7 @@ inputs: # # optional values # #protocol: "https" -# #username: "elastic" -# #password: "changeme" +# #service_token: "example-token" # #path: "" # #ssl.verification_mode: full # #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] diff --git a/x-pack/elastic-agent/_meta/config/elastic-agent.docker.yml.tmpl b/x-pack/elastic-agent/_meta/config/elastic-agent.docker.yml.tmpl index 17201aa6dce..b039db33091 100644 --- a/x-pack/elastic-agent/_meta/config/elastic-agent.docker.yml.tmpl +++ b/x-pack/elastic-agent/_meta/config/elastic-agent.docker.yml.tmpl @@ -43,8 +43,7 @@ inputs: # # optional values # #protocol: "https" -# #username: "elastic" -# #password: "changeme" +# #service_token: "${FLEET_SERVER_SERVICE_TOKEN}" # #path: "" # #ssl.verification_mode: full # #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] diff --git a/x-pack/elastic-agent/_meta/elastic-agent.yml b/x-pack/elastic-agent/_meta/elastic-agent.yml index 49388718013..7c24af477d2 100644 --- a/x-pack/elastic-agent/_meta/elastic-agent.yml +++ b/x-pack/elastic-agent/_meta/elastic-agent.yml @@ -43,8 +43,7 @@ inputs: # # optional values # #protocol: "https" -# #username: "elastic" -# #password: "changeme" +# #service_token: "example-token" # #path: "" # #ssl.verification_mode: full # #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] diff --git a/x-pack/elastic-agent/elastic-agent.docker.yml b/x-pack/elastic-agent/elastic-agent.docker.yml index b7d5ff2017e..91148cee08e 100644 --- a/x-pack/elastic-agent/elastic-agent.docker.yml +++ b/x-pack/elastic-agent/elastic-agent.docker.yml @@ -43,8 +43,7 @@ inputs: # # optional values # #protocol: "https" -# #username: "elastic" -# #password: "changeme" +# #service_token: "${FLEET_SERVER_SERVICE_TOKEN}" # #path: "" # #ssl.verification_mode: full # #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] diff --git a/x-pack/elastic-agent/elastic-agent.reference.yml b/x-pack/elastic-agent/elastic-agent.reference.yml index da04df95ea8..7770b036dba 100644 --- a/x-pack/elastic-agent/elastic-agent.reference.yml +++ b/x-pack/elastic-agent/elastic-agent.reference.yml @@ -11,8 +11,9 @@ outputs: default: type: elasticsearch hosts: [127.0.0.1:9200] - username: elastic - password: changeme + api-key: "example-key" + # username: "elastic" + # password: "changeme" inputs: - type: system/metrics @@ -49,8 +50,7 @@ inputs: # # optional values # #protocol: "https" -# #username: "elastic" -# #password: "changeme" +# #service_token: "example-token" # #path: "" # #ssl.verification_mode: full # #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] diff --git a/x-pack/elastic-agent/elastic-agent.yml b/x-pack/elastic-agent/elastic-agent.yml index 802df992ba7..d2cfa19d384 100644 --- a/x-pack/elastic-agent/elastic-agent.yml +++ b/x-pack/elastic-agent/elastic-agent.yml @@ -11,8 +11,9 @@ outputs: default: type: elasticsearch hosts: [127.0.0.1:9200] - username: elastic - password: changeme + api-key: "example-key" + # username: "elastic" + # password: "changeme" inputs: - type: system/metrics @@ -80,8 +81,7 @@ inputs: # # optional values # #protocol: "https" -# #username: "elastic" -# #password: "changeme" +# #service_token: "example-token" # #path: "" # #ssl.verification_mode: full # #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] diff --git a/x-pack/elastic-agent/pkg/agent/cmd/container.go b/x-pack/elastic-agent/pkg/agent/cmd/container.go index d72b0128430..f127d972bf1 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/container.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/container.go @@ -79,8 +79,6 @@ The following actions are possible and grouped based on the actions. The following vars are need in the scenario that Elastic Agent should automatically fetch its own token. KIBANA_FLEET_HOST - kibana host to enable create enrollment token on [$KIBANA_HOST] - KIBANA_FLEET_USERNAME - kibana username to create enrollment token [$KIBANA_USERNAME] - KIBANA_FLEET_PASSWORD - kibana password to create enrollment token [$KIBANA_PASSWORD] FLEET_TOKEN_NAME - token name to use for fetching token from Kibana. This requires Kibana configs to be set. FLEET_TOKEN_POLICY_NAME - token policy name to use for fetching token from Kibana. This requires Kibana configs to be set. @@ -93,8 +91,6 @@ The following actions are possible and grouped based on the actions. FLEET_SERVER_ENABLE - set to 1 enables bootstrapping of Fleet Server inside Elastic Agent (forces FLEET_ENROLL enabled) FLEET_SERVER_ELASTICSEARCH_HOST - elasticsearch host for Fleet Server to communicate with [$ELASTICSEARCH_HOST] - FLEET_SERVER_ELASTICSEARCH_USERNAME - elasticsearch username for Fleet Server [$ELASTICSEARCH_USERNAME] - FLEET_SERVER_ELASTICSEARCH_PASSWORD - elasticsearch password for Fleet Server [$ELASTICSEARCH_PASSWORD] FLEET_SERVER_ELASTICSEARCH_CA - path to certificate authority to use with communicate with elasticsearch [$ELASTICSEARCH_CA] FLEET_SERVER_ELASTICSEARCH_CA_TRUSTED_FINGERPRINT - The sha-256 fingerprint value of the certificate authority to trust FLEET_SERVER_ELASTICSEARCH_INSECURE - disables cert validation for communication with Elasticsearch @@ -113,8 +109,6 @@ The following actions are possible and grouped based on the actions. KIBANA_FLEET_SETUP - set to 1 enables the setup of Fleet in Kibana by Elastic Agent. This was previously FLEET_SETUP. KIBANA_FLEET_HOST - Kibana host accessible from fleet-server. [$KIBANA_HOST] - KIBANA_FLEET_USERNAME - kibana username to enable Fleet [$KIBANA_USERNAME] - KIBANA_FLEET_PASSWORD - kibana password to enable Fleet [$KIBANA_PASSWORD] KIBANA_FLEET_CA - path to certificate authority to use with communicate with Kibana [$KIBANA_CA] KIBANA_REQUEST_RETRY_SLEEP - specifies sleep duration taken when agent performs a request to kibana [default 1s] KIBANA_REQUEST_RETRY_COUNT - specifies number of retries agent performs when executing a request to kibana [default 30] @@ -123,12 +117,8 @@ The following environment variables are provided as a convenience to prevent a l be used when the same credentials will be used across all the possible actions above. ELASTICSEARCH_HOST - elasticsearch host [http://elasticsearch:9200] - ELASTICSEARCH_USERNAME - elasticsearch username [elastic] - ELASTICSEARCH_PASSWORD - elasticsearch password [changeme] ELASTICSEARCH_CA - path to certificate authority to use with communicate with elasticsearch KIBANA_HOST - kibana host [http://kibana:5601] - KIBANA_USERNAME - kibana username [$ELASTICSEARCH_USERNAME] - KIBANA_PASSWORD - kibana password [$ELASTICSEARCH_PASSWORD] KIBANA_CA - path to certificate authority to use with communicate with Kibana [$ELASTICSEARCH_CA] @@ -427,10 +417,7 @@ func buildFleetServerConnStr(cfg fleetServerConfig) (string, error) { if u.Path != "" { path += "/" + strings.TrimLeft(u.Path, "/") } - if cfg.Elasticsearch.ServiceToken != "" { - return fmt.Sprintf("%s://%s%s", u.Scheme, u.Host, path), nil - } - return fmt.Sprintf("%s://%s:%s@%s%s", u.Scheme, cfg.Elasticsearch.Username, cfg.Elasticsearch.Password, u.Host, path), nil + return fmt.Sprintf("%s://%s%s", u.Scheme, u.Host, path), nil } func kibanaSetup(cfg setupConfig, client *kibana.Client, streams *cli.IOStreams) error { @@ -485,8 +472,6 @@ func kibanaClient(cfg kibanaConfig, headers map[string]string) (*kibana.Client, return kibana.NewClientWithConfigDefault(&kibana.ClientConfig{ Host: cfg.Fleet.Host, - Username: cfg.Fleet.Username, - Password: cfg.Fleet.Password, ServiceToken: cfg.Fleet.ServiceToken, IgnoreVersion: true, Transport: transport, diff --git a/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd_test.go b/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd_test.go index d5820275371..17bcbcedd25 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd_test.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/enroll_cmd_test.go @@ -157,8 +157,6 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) require.Equal(t, "my-access-api-key", config.AccessAPIKey) require.Equal(t, host, config.Client.Host) - require.Equal(t, "", config.Client.Username) - require.Equal(t, "", config.Client.Password) }, )) @@ -217,8 +215,6 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) require.Equal(t, "my-access-api-key", config.AccessAPIKey) require.Equal(t, host, config.Client.Host) - require.Equal(t, "", config.Client.Username) - require.Equal(t, "", config.Client.Password) }, )) @@ -277,8 +273,6 @@ func TestEnroll(t *testing.T) { require.NoError(t, err) require.Equal(t, "my-access-api-key", config.AccessAPIKey) require.Equal(t, host, config.Client.Host) - require.Equal(t, "", config.Client.Username) - require.Equal(t, "", config.Client.Password) }, )) diff --git a/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go b/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go index b33c0f8fa8e..39cf43de28c 100644 --- a/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go +++ b/x-pack/elastic-agent/pkg/agent/cmd/setup_config.go @@ -43,8 +43,6 @@ type elasticsearchConfig struct { CA string `config:"ca"` CATrustedFingerprint string `config:"ca_trusted_fingerprint"` Host string `config:"host"` - Username string `config:"username"` - Password string `config:"password"` ServiceToken string `config:"service_token"` Insecure bool `config:"insecure"` } @@ -59,9 +57,7 @@ type kibanaConfig struct { type kibanaFleetConfig struct { CA string `config:"ca"` Host string `config:"host"` - Password string `config:"password"` Setup bool `config:"setup"` - Username string `config:"username"` ServiceToken string `config:"service_token"` } @@ -93,8 +89,6 @@ func defaultAccessConfig() (setupConfig, error) { CertKey: envWithDefault("", "FLEET_SERVER_CERT_KEY"), Elasticsearch: elasticsearchConfig{ Host: envWithDefault("http://elasticsearch:9200", "FLEET_SERVER_ELASTICSEARCH_HOST", "ELASTICSEARCH_HOST"), - Username: envWithDefault("elastic", "FLEET_SERVER_ELASTICSEARCH_USERNAME", "ELASTICSEARCH_USERNAME"), - Password: envWithDefault("changeme", "FLEET_SERVER_ELASTICSEARCH_PASSWORD", "ELASTICSEARCH_PASSWORD"), ServiceToken: envWithDefault("", "FLEET_SERVER_SERVICE_TOKEN"), CA: envWithDefault("", "FLEET_SERVER_ELASTICSEARCH_CA", "ELASTICSEARCH_CA"), CATrustedFingerprint: envWithDefault("", "FLEET_SERVER_ELASTICSEARCH_CA_TRUSTED_FINGERPRINT"), @@ -115,8 +109,6 @@ func defaultAccessConfig() (setupConfig, error) { // reflect that its setting up Fleet in Kibana versus setting up Fleet Server. Setup: envBool("KIBANA_FLEET_SETUP", "FLEET_SETUP"), Host: envWithDefault("http://kibana:5601", "KIBANA_FLEET_HOST", "KIBANA_HOST"), - Username: envWithDefault("elastic", "KIBANA_FLEET_USERNAME", "KIBANA_USERNAME", "ELASTICSEARCH_USERNAME"), - Password: envWithDefault("changeme", "KIBANA_FLEET_PASSWORD", "KIBANA_PASSWORD", "ELASTICSEARCH_PASSWORD"), ServiceToken: envWithDefault("", "KIBANA_FLEET_SERVICE_TOKEN", "FLEET_SERVER_SERVICE_TOKEN"), CA: envWithDefault("", "KIBANA_FLEET_CA", "KIBANA_CA", "ELASTICSEARCH_CA"), }, diff --git a/x-pack/elastic-agent/pkg/agent/configuration/fleet_server.go b/x-pack/elastic-agent/pkg/agent/configuration/fleet_server.go index 425d899a55b..5a4e135afae 100644 --- a/x-pack/elastic-agent/pkg/agent/configuration/fleet_server.go +++ b/x-pack/elastic-agent/pkg/agent/configuration/fleet_server.go @@ -37,8 +37,6 @@ type Elasticsearch struct { Protocol string `config:"protocol" yaml:"protocol"` Hosts []string `config:"hosts" yaml:"hosts"` Path string `config:"path" yaml:"path,omitempty"` - Username string `config:"username" yaml:"username,omitempty"` - Password string `config:"password" yaml:"password,omitempty"` ServiceToken string `config:"service_token" yaml:"service_token,omitempty"` TLS *tlscommon.Config `config:"ssl" yaml:"ssl,omitempty"` Headers map[string]string `config:"headers" yaml:"headers,omitempty"` @@ -70,18 +68,9 @@ func ElasticsearchFromConnStr(conn string, serviceToken string, insecure bool) ( VerificationMode: tlscommon.VerifyNone, } } - if serviceToken != "" { - cfg.ServiceToken = serviceToken - return cfg, nil + if serviceToken == "" { + return Elasticsearch{}, errors.New("invalid connection string: must include a service token") } - if u.User == nil || u.User.Username() == "" { - return Elasticsearch{}, errors.New("invalid connection string: must include a username unless a service token is provided") - } - password, ok := u.User.Password() - if !ok { - return Elasticsearch{}, errors.New("invalid connection string: must include a password unless a service token is provided") - } - cfg.Username = u.User.Username() - cfg.Password = password + cfg.ServiceToken = serviceToken return cfg, nil } diff --git a/x-pack/elastic-agent/pkg/remote/client.go b/x-pack/elastic-agent/pkg/remote/client.go index 19e1da1dbb8..23f6162c08e 100644 --- a/x-pack/elastic-agent/pkg/remote/client.go +++ b/x-pack/elastic-agent/pkg/remote/client.go @@ -60,19 +60,10 @@ func NewConfigFromURL(kURL string) (Config, error) { return Config{}, errors.Wrap(err, "could not parse url") } - var username, password string - if u.User != nil { - username = u.User.Username() - // _ is true when password is set. - password, _ = u.User.Password() - } - c := DefaultClientConfig() c.Protocol = Protocol(u.Scheme) c.Host = u.Host c.Path = u.Path - c.Username = username - c.Password = password return c, nil } @@ -126,11 +117,6 @@ func NewWithConfig(log *logger.Logger, cfg Config, wrapper wrapperFunc) (*Client return nil, err } - if cfg.IsBasicAuth() { - // Pass basic auth credentials to all the underlying calls. - transport = NewBasicAuthRoundTripper(transport, cfg.Username, cfg.Password) - } - if wrapper != nil { transport, err = wrapper(transport) if err != nil { diff --git a/x-pack/elastic-agent/pkg/remote/client_test.go b/x-pack/elastic-agent/pkg/remote/client_test.go index a48ebe82daf..ef8a5f0d626 100644 --- a/x-pack/elastic-agent/pkg/remote/client_test.go +++ b/x-pack/elastic-agent/pkg/remote/client_test.go @@ -160,58 +160,6 @@ func TestHTTPClient(t *testing.T) { }, )) - t.Run("Basic auth when credentials are valid", withServer( - func(t *testing.T) *http.ServeMux { - msg := `{ message: "hello" }` - mux := http.NewServeMux() - mux.HandleFunc("/echo-hello", basicAuthHandler(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, msg) - }, "hello", "world", "testing")) - return mux - }, func(t *testing.T, host string) { - cfg := config.MustNewConfigFrom(map[string]interface{}{ - "username": "hello", - "password": "world", - "host": host, - }) - - client, err := NewWithRawConfig(nil, cfg, nil) - require.NoError(t, err) - resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) - require.NoError(t, err) - - body, err := ioutil.ReadAll(resp.Body) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, `{ message: "hello" }`, string(body)) - }, - )) - - t.Run("Basic auth when credentials are invalid", withServer( - func(t *testing.T) *http.ServeMux { - msg := `{ message: "hello" }` - mux := http.NewServeMux() - mux.HandleFunc("/echo-hello", basicAuthHandler(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, msg) - }, "hello", "world", "testing")) - return mux - }, func(t *testing.T, host string) { - cfg := config.MustNewConfigFrom(map[string]interface{}{ - "username": "bye", - "password": "world", - "host": host, - }) - - client, err := NewWithRawConfig(nil, cfg, nil) - require.NoError(t, err) - resp, err := client.Send(ctx, "GET", "/echo-hello", nil, nil, nil) - require.NoError(t, err) - assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - }, - )) - t.Run("Custom user agent", withServer( func(t *testing.T) *http.ServeMux { msg := `{ message: "hello" }` @@ -400,19 +348,6 @@ func withServer(m func(t *testing.T) *http.ServeMux, test func(t *testing.T, hos } } -func basicAuthHandler(handler http.HandlerFunc, username, password, realm string) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - - if !ok || u != username || p != password { - w.Header().Set("WWW-Authenticate", `Basic realm="`+realm+`"`) - http.Error(w, "Unauthorized", http.StatusUnauthorized) - return - } - handler(w, r) - } -} - type debugStack struct { sync.Mutex messages []string diff --git a/x-pack/elastic-agent/pkg/remote/config.go b/x-pack/elastic-agent/pkg/remote/config.go index 31ae29f70ba..495f850f5bc 100644 --- a/x-pack/elastic-agent/pkg/remote/config.go +++ b/x-pack/elastic-agent/pkg/remote/config.go @@ -15,8 +15,6 @@ import ( type Config struct { Protocol Protocol `config:"protocol" yaml:"protocol"` SpaceID string `config:"space.id" yaml:"space.id,omitempty"` - Username string `config:"username" yaml:"username,omitempty"` - Password string `config:"password" yaml:"password,omitempty"` Path string `config:"path" yaml:"path,omitempty"` Host string `config:"host" yaml:"host,omitempty"` Hosts []string `config:"hosts" yaml:"hosts,omitempty"` @@ -55,17 +53,10 @@ func DefaultClientConfig() Config { Host: "localhost:5601", Path: "", SpaceID: "", - Username: "", - Password: "", Transport: transport, } } -// IsBasicAuth returns true if the username and password are both defined. -func (c *Config) IsBasicAuth() bool { - return len(c.Username) > 0 && len(c.Password) > 0 -} - // GetHosts returns the hosts to connect. // // This looks first at `Hosts` and then at `Host` when `Hosts` is not defined. diff --git a/x-pack/elastic-agent/pkg/remote/config_test.go b/x-pack/elastic-agent/pkg/remote/config_test.go index 403609735dd..5a71cb9b6cd 100644 --- a/x-pack/elastic-agent/pkg/remote/config_test.go +++ b/x-pack/elastic-agent/pkg/remote/config_test.go @@ -21,8 +21,6 @@ func TestPackUnpack(t *testing.T) { c := Config{ Protocol: Protocol("https"), SpaceID: "123", - Username: "foo", - Password: "bar", Path: "/ok", Transport: httpcommon.HTTPTransportSettings{ Timeout: 10 * time.Second, diff --git a/x-pack/elastic-agent/pkg/remote/round_trippers.go b/x-pack/elastic-agent/pkg/remote/round_trippers.go index 8c5b86f45ca..e6583af57a8 100644 --- a/x-pack/elastic-agent/pkg/remote/round_trippers.go +++ b/x-pack/elastic-agent/pkg/remote/round_trippers.go @@ -123,36 +123,6 @@ func NewDebugRoundTripper(wrapped http.RoundTripper, log debugLogger) http.Round return &DebugRoundTripper{rt: wrapped, log: log} } -// BasicAuthRoundTripper wraps any request using a basic auth. -type BasicAuthRoundTripper struct { - rt http.RoundTripper - username string - password string -} - -// RoundTrip add username and password on every request send to the remove service. -func (r *BasicAuthRoundTripper) RoundTrip( - req *http.Request, -) (*http.Response, error) { - // if we already have authorization set on the request we do not force our username, password. - const key = "Authorization" - - if len(req.Header.Get(key)) > 0 { - return r.rt.RoundTrip(req) - } - - req.SetBasicAuth(r.username, r.password) - return r.rt.RoundTrip(req) -} - -// NewBasicAuthRoundTripper returns a Basic Auth round tripper. -func NewBasicAuthRoundTripper( - wrapped http.RoundTripper, - username, password string, -) http.RoundTripper { - return &BasicAuthRoundTripper{rt: wrapped, username: username, password: password} -} - func prettyBody(data []byte) []byte { var pretty bytes.Buffer From 77d59694597024d959024d928396be103deb5ad7 Mon Sep 17 00:00:00 2001 From: Alex Resnick Date: Sun, 26 Dec 2021 16:03:36 -0600 Subject: [PATCH 48/57] [Filebeat] httpjson - Add function to return User-Agent #29453 (#29528) Adds a new value template function for the httpjson input to generate/update the User Agent. --- CHANGELOG.next.asciidoc | 1 + libbeat/common/useragent/useragent.go | 25 +++++-- libbeat/common/useragent/useragent_test.go | 3 + .../docs/inputs/input-httpjson.asciidoc | 8 ++- x-pack/filebeat/input/httpjson/value_tpl.go | 19 ++++++ .../filebeat/input/httpjson/value_tpl_test.go | 66 +++++++++++++++++++ 6 files changed, 113 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6183bdb2d8d..0a7be0c25fa 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -137,6 +137,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add documentation for add_kubernetes_metadata processors `log_path` matcher. {pull}28868[28868] - Add support for parsers on journald input {pull}29070[29070] - Add support in httpjson input for oAuth2ProviderDefault of password grant_type. {pull}29087[29087] +- Add new `userAgent` and `beatInfo` template functions for httpjson input {pull}29528[29528] *Heartbeat* diff --git a/libbeat/common/useragent/useragent.go b/libbeat/common/useragent/useragent.go index fdff31c2dc1..a60533982ee 100644 --- a/libbeat/common/useragent/useragent.go +++ b/libbeat/common/useragent/useragent.go @@ -18,17 +18,30 @@ package useragent import ( - "fmt" "runtime" + "strings" "github.com/elastic/beats/v7/libbeat/version" ) // UserAgent takes the capitalized name of the current beat and returns // an RFC compliant user agent string for that beat. -func UserAgent(beatNameCapitalized string) string { - return fmt.Sprintf("Elastic-%s/%s (%s; %s; %s; %s)", - beatNameCapitalized, - version.GetDefaultVersion(), runtime.GOOS, runtime.GOARCH, - version.Commit(), version.BuildTime()) +func UserAgent(beatNameCapitalized string, additionalComments ...string) string { + var builder strings.Builder + builder.WriteString("Elastic-" + beatNameCapitalized + "/" + version.GetDefaultVersion() + " ") + uaValues := []string{ + runtime.GOOS, + runtime.GOARCH, + version.Commit(), + version.BuildTime().String(), + } + for _, val := range additionalComments { + if val != "" { + uaValues = append(uaValues, val) + } + } + builder.WriteByte('(') + builder.WriteString(strings.Join(uaValues, "; ")) + builder.WriteByte(')') + return builder.String() } diff --git a/libbeat/common/useragent/useragent_test.go b/libbeat/common/useragent/useragent_test.go index d55f20dde4b..9ca1951059f 100644 --- a/libbeat/common/useragent/useragent_test.go +++ b/libbeat/common/useragent/useragent_test.go @@ -27,4 +27,7 @@ import ( func TestUserAgent(t *testing.T) { ua := UserAgent("FakeBeat") assert.Regexp(t, regexp.MustCompile("^Elastic-FakeBeat"), ua) + + ua2 := UserAgent("FakeBeat", "integration_name/1.2.3") + assert.Regexp(t, regexp.MustCompile("; integration_name\\/1\\.2\\.3\\)$"), ua2) } diff --git a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc index cbde7b8b831..15860c45825 100644 --- a/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-httpjson.asciidoc @@ -219,7 +219,9 @@ Some built-in helper functions are provided to work with the input state inside - `join`: joins a list using the specified separator. Example: `[[join .body.arr ","]]` - `sprintf`: formats according to a format specifier and returns the resulting string. Refer to https://pkg.go.dev/fmt#Sprintf[the Go docs] for usage. Example: `[[sprintf "%d:%q" 34 "quote this"]]` - `hmacBase64`: calculates the hmac signature of a list of strings concatenated together. Returns a base64 encoded signature. Supports sha1 or sha256. Example `[[hmac "sha256" "secret" "string1" "string2" (formatDate (now) "RFC1123")]]` -- `uuid`: returns a random UUID such as `a11e8780-e3e7-46d0-8e76-f66e75acf019` Example: `[[ uuid ]]` +- `uuid`: returns a random UUID such as `a11e8780-e3e7-46d0-8e76-f66e75acf019`. Example: `[[ uuid ]]` +- `userAgent`: generates the User Agent with optional additional values. If no arguments are provided, it will generate the default User Agent that is added to all requests by default. It is recommended to delete the existing User-Agent header before setting a new one. Example: `[[ userAgent "integration/1.2.3" ]]` would generate `Elastic-Filebeat/8.1.0 (darwin; amd64; 9b893e88cfe109e64638d65c58fd75c2ff695402; 2021-12-15 13:20:00 +0000 UTC; integration_name/1.2.3)` +- `beatInfo`: returns a map containing information about the Beat. Available keys in the map are `goos` (running operating system), `goarch` (running system architecture), `commit` (git commit of current build), `buildtime` (compile time of current build), `version` (version of current build). Example: `[[ beatInfo.version ]]` returns `{version}`. In addition to the provided functions, any of the native functions for https://golang.org/pkg/time/#Time[`time.Time`], https://golang.org/pkg/net/http/#Header[`http.Header`], and https://golang.org/pkg/net/url/#Values[`url.Values`] types can be used on the corresponding objects. Examples: `[[(now).Day]]`, `[[.last_response.header.Get "key"]]` @@ -290,8 +292,8 @@ The user used as part of the authentication flow. It is required for authenticat The password used as part of the authentication flow. It is required for authentication - grant type password. It is only available for provider `default`. -NOTE: user and password are required for grant_type password. If user and -password is not used then it will automatically use the `token_url` and +NOTE: user and password are required for grant_type password. If user and +password is not used then it will automatically use the `token_url` and `client credential` method. [float] diff --git a/x-pack/filebeat/input/httpjson/value_tpl.go b/x-pack/filebeat/input/httpjson/value_tpl.go index 95c95093e5f..d2786b4ce76 100644 --- a/x-pack/filebeat/input/httpjson/value_tpl.go +++ b/x-pack/filebeat/input/httpjson/value_tpl.go @@ -16,6 +16,7 @@ import ( "hash" "reflect" "regexp" + "runtime" "strconv" "strings" "text/template" @@ -23,7 +24,9 @@ import ( "github.com/google/uuid" + "github.com/elastic/beats/v7/libbeat/common/useragent" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/version" ) // we define custom delimiters to prevent issues when using template values as part of other Go templates. @@ -66,6 +69,8 @@ func (t *valueTpl) Unpack(in string) error { "sprintf": fmt.Sprintf, "hmacBase64": hmacStringBase64, "uuid": uuidString, + "userAgent": userAgentString, + "beatInfo": beatInfo, }). Delims(leftDelim, rightDelim). Parse(in) @@ -360,3 +365,17 @@ func join(v interface{}, sep string) string { // return the stringified single value return fmt.Sprint(v) } + +func userAgentString(values ...string) string { + return useragent.UserAgent("Filebeat", values...) +} + +func beatInfo() map[string]string { + return map[string]string{ + "goos": runtime.GOOS, + "goarch": runtime.GOARCH, + "commit": version.Commit(), + "buildtime": version.BuildTime().String(), + "version": version.GetDefaultVersion(), + } +} diff --git a/x-pack/filebeat/input/httpjson/value_tpl_test.go b/x-pack/filebeat/input/httpjson/value_tpl_test.go index ad6aab449de..8c111ee78b6 100644 --- a/x-pack/filebeat/input/httpjson/value_tpl_test.go +++ b/x-pack/filebeat/input/httpjson/value_tpl_test.go @@ -6,13 +6,16 @@ package httpjson import ( "net/http" + "runtime" "testing" "time" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/common/useragent" "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/libbeat/version" ) func TestValueTpl(t *testing.T) { @@ -394,6 +397,69 @@ func TestValueTpl(t *testing.T) { expectedVal: "", expectedError: errEmptyTemplateResult.Error(), }, + { + name: "func userAgent no values", + value: `[[userAgent]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: useragent.UserAgent("Filebeat"), + }, + { + name: "func userAgent blank value", + value: `[[userAgent ""]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: useragent.UserAgent("Filebeat"), + }, + { + name: "func userAgent 1 value", + value: `[[userAgent "integration_name/1.2.3"]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: useragent.UserAgent("Filebeat", "integration_name/1.2.3"), + }, + { + name: "func userAgent 2 value", + value: `[[userAgent "integration_name/1.2.3" "test"]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: useragent.UserAgent("Filebeat", "integration_name/1.2.3", "test"), + }, + { + name: "func beatInfo GOOS", + value: `[[beatInfo.goos]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: runtime.GOOS, + }, + { + name: "func beatInfo Arch", + value: `[[beatInfo.goarch]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: runtime.GOARCH, + }, + { + name: "func beatInfo Commit", + value: `[[beatInfo.commit]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: version.Commit(), + }, + { + name: "func beatInfo Build Time", + value: `[[beatInfo.buildtime]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: version.BuildTime().String(), + }, + { + name: "func beatInfo Version", + value: `[[beatInfo.version]]`, + paramCtx: emptyTransformContext(), + paramTr: transformable{}, + expectedVal: version.GetDefaultVersion(), + }, } for _, tc := range cases { From 90dd16973b1d0e90c377de5b73a0967686691ced Mon Sep 17 00:00:00 2001 From: Kevin Lacabane Date: Mon, 27 Dec 2021 00:35:23 +0100 Subject: [PATCH 49/57] beat/kibana module - add {module}.elasticsearch.cluster.id property (#29577) * add elasticsearch.cluster.id property to beats state * add elasticsearch.cluster.id property to beats stats * fix test * lint * add missing reloads property * dont break earlier assumptions * remove trailing comma * update beats.stats fields * add updated fields doc * beat: update cluster.id path * kibana: update cluster.id path * mage fmt update * fix fields path * add missing map initialization * add changelog entry * fix pr number --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/fields.asciidoc | 21 +++++++++++ metricbeat/module/beat/_meta/fields.yml | 2 ++ metricbeat/module/beat/fields.go | 2 +- metricbeat/module/beat/state/_meta/data.json | 7 +++- metricbeat/module/beat/state/data.go | 5 ++- metricbeat/module/beat/stats/_meta/data.json | 10 ++++-- metricbeat/module/beat/stats/_meta/fields.yml | 2 ++ metricbeat/module/beat/stats/data.go | 16 +++++---- metricbeat/module/beat/stats/data_test.go | 4 ++- metricbeat/module/beat/stats/stats.go | 35 ++++++++++++++++++- metricbeat/module/kibana/_meta/fields.yml | 2 ++ metricbeat/module/kibana/fields.go | 2 +- metricbeat/module/kibana/stats/data.go | 2 +- 14 files changed, 96 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 0a7be0c25fa..419b02cb29b 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -149,6 +149,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add `add_resource_metadata` configuration to Kubernetes module. {pull}29133[29133] - Add `container.id` and `container.runtime` ECS fields in container metricset. {pull}29560[29560] - Add `memory.workingset.limit.pct` field in Kubernetes container/pod metricset. {pull}29547[29547] +- Add `elasticsearch.cluster.id` field to Beat and Kibana modules. {pull}29577[29577] *Packetbeat* diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 2553dc415ed..e1e256d0dc3 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -7535,6 +7535,13 @@ type: keyword -- +*`beat.elasticsearch.cluster.id`*:: ++ +-- +type: keyword + +-- + [float] === state @@ -8707,6 +8714,13 @@ type: short -- +*`beat.stats.libbeat.config.reloads`*:: ++ +-- +type: short + +-- + [float] === output @@ -40648,6 +40662,13 @@ alias to: service.id -- +*`kibana.elasticsearch.cluster.id`*:: ++ +-- +type: keyword + +-- + [float] === settings diff --git a/metricbeat/module/beat/_meta/fields.yml b/metricbeat/module/beat/_meta/fields.yml index b29bffb2cfa..8cb1de91166 100644 --- a/metricbeat/module/beat/_meta/fields.yml +++ b/metricbeat/module/beat/_meta/fields.yml @@ -634,3 +634,5 @@ type: keyword description: > Beat type. + - name: elasticsearch.cluster.id + type: keyword diff --git a/metricbeat/module/beat/fields.go b/metricbeat/module/beat/fields.go index 03105135aaa..4948b3e3346 100644 --- a/metricbeat/module/beat/fields.go +++ b/metricbeat/module/beat/fields.go @@ -32,5 +32,5 @@ func init() { // AssetBeat returns asset data. // This is the base64 encoded zlib format compressed contents of module/beat. func AssetBeat() string { - return "eJzsXU1v5LgRvftXED5nBOxhLz4kQZAE2EsCBAvkEAQGrWZ3E5ZIDUl54v31gaRutT5YrJJEqnt3x5cZjEfvPRbJYvGzvrB38fnC3gR3T4w56Qrxwp7/Irh7fmLsIGxuZOWkVi/sj0+MMdb8ipX6UBfiiTEjCsGteGEn/sSYFc5JdbIv7D/P1hbPf2DPZ+eq5/82vztr415zrY7y9MKOvLDN90cpioN9aZG/MMVL0Wmxr9ZxZ9t/Z8x9Vg2F0XV1+Zfhd8NveVV+scJ8CNP/yve5D2IIUxmdC2u1Gf0WQoLQhoi24ipzhit71KbkjUXt7D9fCXghue+3FXfnzj5Za56MV+VrV9ysl5yRqK66hDGzUoZLGirttMS+QuDFpBe1VZ/BTL0Wx/N3Z3gudlKE8F11HQ0vd5IUoLqqwZtnfFkUzqu+g8j1QapT9+k++iicM325rpXbVx5MeVX3wQt5aK28p/1orB6N+9kQJR31D543//NX6zEHZXgovznS9UjecyiM6kO7/76nMpT2Pj5+JvEhPf1Q5cP6e6/Ix/P6Q5kP7fsBoeERwOra5KLkPg+/3f+33MIXjccseF+GLMR3n5540/aw/dAj8fF64U3kQ/dBr8xwDyyFMzJP0v32HRq7gqwaFYPKIAtgVmDTRQGoeDQjLDYEzdFMnOWdJC7p8L/JyqL6s7tWV9CnjPrUbAxcv853EMeCO5EoPlBOKPelEOrkzkn808Uc2aUYGYGSUsOxRYVGiNMvMlV4tpv5mzI8lu0vikKGr1Wuy8oIa8Xh114Bw7I8VkVMlIUqxAg+d20szlKV/MXn4+IVs9OegTz7GfyiBAlLpbVSnb6MmwoYKKzf5rmqIhP2NTbdFWObxjkjvtbCugQlvPwRJhi4hLw2RiiXfePSZWXcTbXLHwSWm11spZVNFQCkauy90Tv11Ag3eXCr35PGjNNit4XKAqT9PnOei8p5x7nU6lDq/UJuv0C87RDWQGI0nv0N0BUsuHYyWG/QuuDmBI2jSRWi3JOJ7F1Eoty90+VOFLKUd6lsnPwq82st6rsYMkzcd5dC+yP35PoQ5n6/T5s3eTgIaE0lqUacfBaT5J/3cT44/W3Kxmt31kb+cp+KJ/FfxUrlhFG8uIdQlHu8nnQPiQjzYAH9rO9S2R7mWzhVBgN+WFNIRxh1zO2N2h9xejSUS5wk1cqKhCL88FgQFok8XHJokrJ9Kqa0K/VBHiXouLZNyEaF7OJqjDL9LNEjCg/2wdlUdC0AE3HOFl0OyIdOg7Y3TyR0jlhWSqxOHMHjqwoSUsLy+JJgtp37LzpXJYU08QUF6Igz5/iagoSDYPqDy4K/FfsJwzipU6f4ysKMpGg0vqgA3WBgP+pa7acpSHjznK2D/VoLA83qUnhPhJS6/hJfWpiRtKKRwJXCdAsm3EmcxJx0eNvqacq+5K7UWdsN0zWpjjqbQfSdkZf+ORkdewbRDyKf1VbsGURfzfVs2rEUewbRh3bC2Pm5oqXwU5Tx+Tq7qUXYT+tEGW0unVd1lmvjPXi/cBbZKctgxCtnofkh+yEaIQA3YvsxLtscbly2yHQevBGf0mbaIliUmc7zD8/xHOmwRI1ibxMYcv+YlNxXRwP2pOQ/ert2Id8mAwbb1LcrWYlCqkTb5YUUykU8yHspfXZVnYUYRtPLjOfvMUf8mRKMpV9z+BDI5DLGzivPnfxIsAo+K3ZbHJshfP1M1uiqSrHTAenCCPvpGZfFnroQvl6WLJwwuwrDGHvHUb8V0p731IZT3lb/HThNSyAsTHdbNHEp9s4gUX66flG4dlUNbwts8fod/w4uDnbobLs5OwPd/FuIbC+vO9VE8rlv3OVn8JZudFUY3W5DwVgWdSA41FUhc57kZBmg7MqI2yz1KDUWRhyjnNYlVwm97VgVRreTt52KgsiGR8Gzt0+4ljfEoBctCMVISexLkD4phOuOniWgeCJA8Cv7NyOdSFwpGMdYS+JqCZIMTk8dpe+K4/bIoHu1KjO1UtgtylXF7KQH8SdSrOMmxQT1oiQAPxOiq5Q6pujA6jPbtJjQLmyK6ixKYXjxGjj5QV7iwiFH5HXlZCmi3EJA8PrVd64ORWBpdEt/aTdYsjM3ERcuLnozBHsswepjxK2lsQQQu5+qVDF3Ta/kXtTbInw5fP5uTrmlVk/5qxL/i2jPq9oshDwomjafr7wotO8W/1YJHXwGw090hMK0CDoIgZmN6HZ7eh9qP8BCjSfCgmtVJ593g5eO2PqoujNJuy0UgO9LebRZJYzUh6xOMK8diKExDXV9rbXje8hCifrNQMCRjeWEWgYjtI4hY2exECcjm4Ehpuj+gcLYzznORjtXiAPpq7g6l3HPFYOPoqQVizyLklc1z3MHd90VnvSmJgQ9VdC6e9hK23UECcZDWwpzhJEH/JfYKvLcdqyCQjOUVFt+ij7dnksK0YTHyQi38rtNXCfz95hlbLokhjyTAE2DYoiAsUfLb9kHL2Ke7G00YMBjAQlqAgOeCkhQDzh0fw7KCpPCCAjuhD6BCUbIT2yyotG9Xn5tIPN+Db1eftezeCJr/5rkLN4FO8lZvAt2krN4F+xUZ/Eu8NBZvKZ9WcfLatYmptgd7vOf+y+eZ2DDFslWNS5/FUZsBWDrXbMYyE1+lk7krjbw6aLwalyjJwviDJV7LLOICMQIdMBFBEFwnWaxqSq4O/qP/uHama8JtUXRNgsih/tnBHYf8G0luKo3XmMVGQQyJlGe96gXk8xBJpsDG4sCokxoNhYGRBmfOdnowSCQ3s0K8yHzeBeJt29iiOyiyTdvi+BbPERBTwP3yZVsQ0BPJDW3vSerDRyTjYzWYb2Lz296tHHhRex+2nw5P/01m4/u44BlLXTz3RycNtpj2FOUcbaf60/wKsB+3WFuuvGnwSaOfYw1Wt/3I1cdzQLh+/SF9mw8Dy0AD/OhIkTdpvWEeWEV0zKs/hg4kkH72DOPoH+8pf3kRW2dMPNZCOXjqDF1rpXjUgno+hrNFolCzXdhlAjv5UHKGN7AloGQwl4KECWADVV/yRU/iVIolwnF3+YH6jqIN60Lwac0geGh+fnJslwoZ3gxoGEXmj+tCyilcuI0e6YSEfKPunwThunjBd8yX1nJ0WbInlgM6a9TTD8vRaO+A2e1FQf29tkOwV4R3V2PNBpabPYmpDr5hUD7eusjDLspwrjdfY3m5qDsf2HUEPLIcLRMgCw8pg8RoRx+uF5M81Q3vpELimXjtoOklVoIGMwEtRCLVjULQanZDdaB0h66JGHSX/ZfC7tR7KiawGRs7HvTX4FFSF62BvF7Z3r0zhTKa8WidqVwjiuCZPa9ISRsCGB6JRa1FSR1CyhazMNmaHUSC8EISzwLIeet5PdqGTDS92f/YVHifDgTEI6PcbDJogyaFYUtazKRvAmQiof9XsqPZMRhvxc7gIloWNwwHU5KQxDL4pd7YVYYBHm4p5LIZ2HPFi8oOykzywK8QGIVvOxY+Vm8umfTkXfXQTeYL4WofohHyHOyAjVygEMMqWPaOVIJ2OjAJpoOZAUqIX/HClRKwo0VsFiCjBWQaFKLFZiUPBRrpJLyRqwAJmZ5WIFMSMmwAhXNobACE017gGDeHGI4U8EimISpwqKO5JshoAwECyD2yTeGP+tPkMzSRBOEXGgkHHLuMtrGUziui1Ep5BxUpOKT08iQ0Gg5nfZuKsQsNLQlfGqaMhIa/kj6QkB6UigSHDE9DgkLeUp8IRrhCfCFiPS0abT2S8sfRm0m9MRUM8SbyY76yffFmlMBm06ebTrz9us8thb1zOB349O+79eI9n6COxjtFpqnycKEPf580DWWGQN9w5mCsR0CeB+bRQ2kYHORtDKa2ZZjrYMaNM5oDR2550xZncUuKRMwwpeMyQAbSoFd8SVABO/o0r9fqCDJ0E96UwwdC9C3wcDCeJ/s2eTNk3hjynM11DUH/IkZIhKyaEA5hB16wiaec8afZFkyaVvw0ssq2LWHNgZtEH5ZhdahCI+iEIR0T3ck6Q8Rmt6St1QIh3vor6CAvgh4m269N4JfjSPXHvjeGx0BeqmNgOB/wg0xo//dxvVWBF4ppATnoZcYyd8DzygiVoDGRJAWucXQ3inoQL18plYt4UkbXTupgApYTny7gnKDZpeHYJlU8F2OpQlVEB1/b5sJy3VZasWcZrwoWvJpQR8+MQvqzah5VfBV7L0SBpBe56et7xJfr6etoNKedadhUTOGkNDoST5o652ktBzERXHK8+7hCCjdQ9eUF67tWZvwU62Ex6kpINjD0n6MflxblygEcZPNzz+723ZQcE+xM7A2yEghHklk8/PzZzW4HpjSlREF/a1huZ7kh9ji5lFB9vmJwtlopO6M1QhQ+lshDqd4KVZSyO2ow1U8WH0npl1JZlhUQdSxbFsTWJyd5Y52tZQcMTGH9m1iI2dxSaHUac0acmrX2jD2r1L6c8PH1MSy4QBAeDdcWMxR4F+CH4JLY9QRILQYwZLWfMscMtbiM6YpVDb6QvxXiW16ldS1/u+G5DdR7Y25XODYyAPUfFujUwGhv/w/AAD//1Cnw20=" + return "eJzsXU1v5LgRvftXED5nBOxhLz4kQZAE2EsCBAvkEAQGrWZ3E5ZIDUl54v31gaRutT5YrJJEqnt3x5cZjEfvPRbJYvGzvrB38fnC3gR3T4w56Qrxwp7/Irh7fmLsIGxuZOWkVi/sj0+MMdb8ipX6UBfiiTEjCsGteGEn/sSYFc5JdbIv7D/P1hbPf2DPZ+eq5/82vztr415zrY7y9MKOvLDN90cpioN9aZG/MMVL0Wmxr9ZxZ9t/Z8x9Vg2F0XV1+Zfhd8NveVV+scJ8CNP/yve5D2IIUxmdC2u1Gf0WQoLQhoi24ipzhit71KbkjUXt7D9fCXghue+3FXfnzj5Za56MV+VrV9ysl5yRqK66hDGzUoZLGirttMS+QuDFpBe1VZ/BTL0Wx/N3Z3gudlKE8F11HQ0vd5IUoLqqwZtnfFkUzqu+g8j1QapT9+k++iicM325rpXbVx5MeVX3wQt5aK28p/1orB6N+9kQJR31D543//NX6zEHZXgovznS9UjecyiM6kO7/76nMpT2Pj5+JvEhPf1Q5cP6e6/Ix/P6Q5kP7fsBoeERwOra5KLkPg+/3f+33MIXjccseF+GLMR3n5540/aw/dAj8fF64U3kQ/dBr8xwDyyFMzJP0v32HRq7gqwaFYPKIAtgVmDTRQGoeDQjLDYEzdFMnOWdJC7p8L/JyqL6s7tWV9CnjPrUbAxcv853EMeCO5EoPlBOKPelEOrkzkn808Uc2aUYGYGSUsOxRYVGiNMvMlV4tpv5mzI8lu0vikKGr1Wuy8oIa8Xh114Bw7I8VkVMlIUqxAg+d20szlKV/MXn4+IVs9OegTz7GfyiBAlLpbVSnb6MmwoYKKzf5rmqIhP2NTbdFWObxjkjvtbCugQlvPwRJhi4hLw2RiiXfePSZWXcTbXLHwSWm11spZVNFQCkauy90Tv11Ag3eXCr35PGjNNit4XKAqT9PnOei8p5x7nU6lDq/UJuv0C87RDWQGI0nv0N0BUsuHYyWG/QuuDmBI2jSRWi3JOJ7F1Eoty90+VOFLKUd6lsnPwq82st6rsYMkzcd5dC+yP35PoQ5n6/T5s3eTgIaE0lqUacfBaT5J/3cT44/W3Kxmt31kb+cp+KJ/FfxUrlhFG8uIdQlHu8nnQPiQjzYAH9rO9S2R7mWzhVBgN+WFNIRxh1zO2N2h9xejSUS5wk1cqKhCL88FgQFok8XHJokrJ9Kqa0K/VBHiXouLZNyEaF7OJqjDL9LNEjCg/2wdlUdC0AE3HOFl0OyIdOg7Y3TyR0jlhWSqxOHMHjqwoSUsLy+JJgtp37LzpXJYU08QUF6Igz5/iagoSDYPqDy4K/FfsJwzipU6f4ysKMpGg0vqgA3WBgP+pa7acpSHjznK2D/VoLA83qUnhPhJS6/hJfWpiRtKKRwJXCdAsm3EmcxJx0eNvqacq+5K7UWdsN0zWpjjqbQfSdkZf+ORkdewbRDyKf1VbsGURfzfVs2rEUewbRh3bC2Pm5oqXwU5Tx+Tq7qUXYT+tEGW0unVd1lmvjPXi/cBbZKctgxCtnofkh+yEaIQA3YvsxLtscbly2yHQevBGf0mbaIliUmc7zD8/xHOmwRI1ibxMYcv+YlNxXRwP2pOQ/ert2Id8mAwbb1LcrWYlCqkTb5YUUykU8yHspfXZVnYUYRtPLjOfvMUf8mRKMpV9z+BDI5DLGzivPnfxIsAo+K3ZbHJshfP1M1uiqSrHTAenCCPvpGZfFnroQvl6WLJwwuwrDGHvHUb8V0p731IZT3lb/HThNSyAsTHdbNHEp9s4gUX66flG4dlUNbwts8fod/w4uDnbobLs5OwPd/FuIbC+vO9VE8rlv3OVn8JZudFUY3W5DwVgWdSA41FUhc57kZBmg7MqI2yz1KDUWRhyjnNYlVwm97VgVRreTt52KgsiGR8Gzt0+4ljfEoBctCMVISexLkD4phOuOniWgeCJA8Cv7NyOdSFwpGMdYS+JqCZIMTk8dpe+K4/bIoHu1KjO1UtgtylXF7KQH8SdSrOMmxQT1oiQAPxOiq5Q6pujA6jPbtJjQLmyK6ixKYXjxGjj5QV7iwiFH5HXlZCmi3EJA8PrVd64ORWBpdEt/aTdYsjM3ERcuLnozBHsswepjxK2lsQQQu5+qVDF3Ta/kXtTbInw5fP5uTrmlVk/5qxL/i2jPq9oshDwomjafr7wotO8W/1YJHXwGw090hMK0CDoIgZmN6HZ7eh9qP8BCjSfCgmtVJ593g5eO2PqoujNJuy0UgO9LebRZJYzUh6xOMK8diKExDXV9rbXje8hCifrNQMCRjeWEWgYjtI4hY2exECcjm4Ehpuj+gcLYzznORjtXiAPpq7g6l3HPFYOPoqQVizyLklc1z3MHd90VnvSmJgQ9VdC6e9hK23UECcZDWwpzhJEH/JfYKvLcdqyCQjOUVFt+ij7dnksK0YTHyQi38rtNXCfz95hlbLokhjyTAE2DYoiAsUfLb9kHL2Ke7G00YMBjAQlqAgOeCkhQDzh0fw7KCpPCCAjuhD6BCUbIT2yyotG9Xn5tIPN+Db1eftezeCJr/5rkLN4FO8lZvAt2krN4F+xUZ/Eu8NBZvKZ9WcfLatYmptgd7vOf+y+eZ2DDFslWNS5/FUZsBWDrXbMYyE1+lk7krjbw6aLwalyjJwviDJV7LLOICMQIdMBFBEFwnWaxqSq4O/qP/uHama8JtUXRNgsih/tnBHYf8G0luKo3XmMVGQQyJlGe96gXk8xBJpsDG4sCokxoNhYGRBmfOdnowSCQ3s0K8yHzeBeJt29iiOyiyTdvi+BbPERBTwP3yZVsQ0BPJDW3vSerDRyTjYzWYb2Lz296tHHhRex+2nw5P/01m4/u44BlLXTz3RxcFNw6mVvRDF9ZXtTWCZMRyrIsXMDETVHG6YKuP8G7BPv1p7ntx58G+wj2Mdbqfd+PfH00C4Qv5Bfas3M9tAAcJ4SKEHWf1xMnhlVMy7D6Y+BMB+1jz0SE/vGW9nPt/uBMKPRx1KA818pxqQR0/41mi0Sx6rswSoQ3AyFlDG9gy0BIcTMFiBIBh6q/5IqfRCmUy4Tib/MTeR3Em9aF4FOawPDQ/PxkWS6UM7wY0LALzZ/WRaRSOXGavXOJCPlHXb4Jw/Txgm+Zr6zkcDVkTywI9dcppp+XolHfgbPaigN7+2yHYK+I7rJIGg0tNnsTUp38QqCNwfURht0UYdwuz0Zzc1D6wDBqCHlkOFoqQRYe04eIUBJAXC+meaob3wkGxbJx20HyUi0EDKaSWohFq5qFoNT0COtAaS9lkjDpqQHWwm4UO6omMJsb+970V2ARsp+tQfzemR69M4USY7GoXSmcJIsgmX1vCAkbApifiUVtBUndAooW87QaWp3EQjDCEs9CyHkr+b1aBoz0/emDWJQ4H04lhONjHGyyKIOmVWHLmkwkbwLk8mG/l/IjKXXY78UOYCYbFjdMh7PaEMSy+OVemFYGQR7uqSTyWdi7xwvKTkrtsgAvkJkFLztWfhav7tl05N110A0mXCGqH+IREqWsQI0c4BBD6ph2jlQCNjrxieYTWYFKSACyApWSsWMFLJZhYwUkmhVjBSYlkcUaqaTEEyuAiWkiViATcjqsQEWTMKzARPMmIJg3hxhOdbAIJmGusagj+WYIKIXBAoh9EpbheQEIklmaaIKQTI2EQ05+Rtt4Csd1MSqFnMSKVHxyHhoSGi0p1N5NhZjGhraET81zRkLDX1lfCEjPKkWCI+bXIWEhb5EvRCO8Ib4QkZ53jdZ+aQnIqM2Entlqhngz2VE/+b5Ycypg08mzTWfefp3H1qKeGfxufNr3/RrR3m94B6PdQvM0aZyw16MPusZSa6CPQFMwtkMAD2yzqIEUbC6SVkYz23KsdVCDxhmtoSMXpSmrs9gtZwJG+JYyGWBDKbA7wgSI4CVf+vcLFSQZ+kmPkqFjAfq4GFgY75s/m7x5Em9Mee+GuuaAv1FDREIWDSiHsENv4MRzzvibLksmbQueilkFu/bQxqANwk+z0DoU4VUVgpDu7Y8k/SFC01vyGAvhcA/9GRXQFwGP2633RvCzc+TaAx+MoyNAT70REPxvwCFm9D/8uN6KwDOHlOA89JQj+XvgHUbECtCYCNIitxjaOwUdqJfP1KolPGmjaycVUAHLiW9XUG7Q7PKSLJMKvsuxNCMLouPvbTNhuS5LrZjTjBdFSz4t6MNndkG9GTUxC76KvVfGAdLz/rT1XeLz97QVVNq78DQsasoREho9SwhtvZOU14O4KE55Hz4cAaV7KZvyRLY9axN+65XwujUFBHuZGscwotAcDGFDKP3ouC5fCeJsm59/dnf2oCkCpbaAFUZGChRJIpufnz+rwSXDlA6RKOhvDcv1PgDEFjedC3JagCicjcb7zliNAKW/FeJwipfpJYXcjjpcxYM1fGL2l2SGRRVEHRG3NYHFSWLuaFdLSVUTM0DYJjZyMpkUSp3WrCGndq0NEcQqpT83fExNLIuMu95tGxZzFPiX4IfgAht1BAgtabCkNd8yh4y1+KRqCpWNvhD/VWKb5SV1rf+7IflNVHtjLhc4fPIANd/W6FRA6C//DwAA///Yv+WC" } diff --git a/metricbeat/module/beat/state/_meta/data.json b/metricbeat/module/beat/state/_meta/data.json index 6e85c8a3018..627b84f32ed 100644 --- a/metricbeat/module/beat/state/_meta/data.json +++ b/metricbeat/module/beat/state/_meta/data.json @@ -1,6 +1,11 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", "beat": { + "elasticsearch": { + "cluster": { + "id": "foobar" + } + }, "state": { "beat": { "host": "2963d991095f", @@ -58,4 +63,4 @@ "address": "172.19.0.2:5066", "type": "beat" } -} \ No newline at end of file +} diff --git a/metricbeat/module/beat/state/data.go b/metricbeat/module/beat/state/data.go index 2a356407ef7..d22830e12ba 100644 --- a/metricbeat/module/beat/state/data.go +++ b/metricbeat/module/beat/state/data.go @@ -69,7 +69,8 @@ var ( func eventMapping(r mb.ReporterV2, info beat.Info, content []byte, isXpack bool) error { event := mb.Event{ - RootFields: common.MapStr{}, + RootFields: common.MapStr{}, + ModuleFields: common.MapStr{}, } var data map[string]interface{} @@ -85,6 +86,8 @@ func eventMapping(r mb.ReporterV2, info beat.Info, content []byte, isXpack bool) if isOutputES(data) { clusterUUID = getClusterUUID(data) if clusterUUID != "" { + event.ModuleFields.Put("elasticsearch.cluster.id", clusterUUID) + if event.MetricSetFields != nil { event.MetricSetFields.Put("cluster.uuid", clusterUUID) } diff --git a/metricbeat/module/beat/stats/_meta/data.json b/metricbeat/module/beat/stats/_meta/data.json index ecda8583492..361d56878f0 100644 --- a/metricbeat/module/beat/stats/_meta/data.json +++ b/metricbeat/module/beat/stats/_meta/data.json @@ -2,6 +2,11 @@ "@timestamp": "2017-10-12T08:05:34.853Z", "beat": { "id": "c4c9bc08-e990-4529-8bf5-715c00fa6615", + "elasticsearch": { + "cluster": { + "id": "foobar" + } + }, "stats": { "beat": { "host": "2963d991095f", @@ -48,7 +53,8 @@ "config": { "running": 3, "starts": 3, - "stops": 0 + "stops": 0, + "reloads": 1 }, "output": { "events": { @@ -133,4 +139,4 @@ "name": "beat", "type": "beat" } -} \ No newline at end of file +} diff --git a/metricbeat/module/beat/stats/_meta/fields.yml b/metricbeat/module/beat/stats/_meta/fields.yml index 56b598c7d05..539a4071b4c 100644 --- a/metricbeat/module/beat/stats/_meta/fields.yml +++ b/metricbeat/module/beat/stats/_meta/fields.yml @@ -392,6 +392,8 @@ type: short - name: stops type: short + - name: reloads + type: short - name: output type: group description: > diff --git a/metricbeat/module/beat/stats/data.go b/metricbeat/module/beat/stats/data.go index 5e95f27a22e..096dc028ba7 100644 --- a/metricbeat/module/beat/stats/data.go +++ b/metricbeat/module/beat/stats/data.go @@ -88,10 +88,11 @@ var ( "total": c.Int("total"), }), }), - "config": c.Dict("config.module", s.Schema{ - "running": c.Int("running"), - "starts": c.Int("starts"), - "stops": c.Int("stops"), + "config": c.Dict("config", s.Schema{ + "running": c.Int("module.running"), + "starts": c.Int("module.starts"), + "stops": c.Int("module.stops"), + "reloads": c.Int("reloads"), }), }), "state": c.Dict("metricbeat.beat.state", s.Schema{ @@ -110,7 +111,7 @@ var ( } ) -func eventMapping(r mb.ReporterV2, info beat.Info, content []byte, isXpack bool) error { +func eventMapping(r mb.ReporterV2, info beat.Info, clusterUUID string, content []byte, isXpack bool) error { event := mb.Event{ RootFields: common.MapStr{}, ModuleFields: common.MapStr{}, @@ -118,10 +119,13 @@ func eventMapping(r mb.ReporterV2, info beat.Info, content []byte, isXpack bool) } event.RootFields.Put("service.name", beat.ModuleName) - event.ModuleFields = common.MapStr{} event.ModuleFields.Put("id", info.UUID) event.ModuleFields.Put("type", info.Beat) + if clusterUUID != "" { + event.ModuleFields.Put("elasticsearch.cluster.id", clusterUUID) + } + var data map[string]interface{} err := json.Unmarshal(content, &data) if err != nil { diff --git a/metricbeat/module/beat/stats/data_test.go b/metricbeat/module/beat/stats/data_test.go index 5a58a7c7ee1..ee4b0f38957 100644 --- a/metricbeat/module/beat/stats/data_test.go +++ b/metricbeat/module/beat/stats/data_test.go @@ -42,12 +42,14 @@ func TestEventMapping(t *testing.T) { Beat: "helloworld", } + clusterUUID := "foo" + for _, f := range files { input, err := ioutil.ReadFile(f) require.NoError(t, err) reporter := &mbtest.CapturingReporterV2{} - err = eventMapping(reporter, info, input, true) + err = eventMapping(reporter, info, clusterUUID, input, true) require.NoError(t, err, f) require.True(t, len(reporter.GetEvents()) >= 1, f) diff --git a/metricbeat/module/beat/stats/stats.go b/metricbeat/module/beat/stats/stats.go index 7058a54614b..7c260cf3e0a 100644 --- a/metricbeat/module/beat/stats/stats.go +++ b/metricbeat/module/beat/stats/stats.go @@ -18,6 +18,8 @@ package stats import ( + "github.com/pkg/errors" + "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/elastic/beats/v7/metricbeat/module/beat" @@ -66,5 +68,36 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { return err } - return eventMapping(r, *info, content, m.XPackEnabled) + clusterUUID, err := m.getClusterUUID() + if err != nil { + return err + } + + return eventMapping(r, *info, clusterUUID, content, m.XPackEnabled) +} + +func (m *MetricSet) getClusterUUID() (string, error) { + state, err := beat.GetState(m.MetricSet) + if err != nil { + return "", errors.Wrap(err, "could not get state information") + } + + clusterUUID := state.Monitoring.ClusterUUID + if clusterUUID != "" { + return clusterUUID, nil + } + + if state.Output.Name != "elasticsearch" { + return "", nil + } + + clusterUUID = state.Outputs.Elasticsearch.ClusterUUID + if clusterUUID == "" { + // Output is ES but cluster UUID could not be determined. No point sending monitoring + // data with empty cluster UUID since it will not be associated with the correct ES + // production cluster. Log error instead. + return "", beat.ErrClusterUUID + } + + return clusterUUID, nil } diff --git a/metricbeat/module/kibana/_meta/fields.yml b/metricbeat/module/kibana/_meta/fields.yml index 633cbadde96..05a9818f2ae 100644 --- a/metricbeat/module/kibana/_meta/fields.yml +++ b/metricbeat/module/kibana/_meta/fields.yml @@ -63,3 +63,5 @@ - name: kibana type: group fields: + - name: elasticsearch.cluster.id + type: keyword diff --git a/metricbeat/module/kibana/fields.go b/metricbeat/module/kibana/fields.go index 815dad6c853..2c728c5076d 100644 --- a/metricbeat/module/kibana/fields.go +++ b/metricbeat/module/kibana/fields.go @@ -32,5 +32,5 @@ func init() { // AssetKibana returns asset data. // This is the base64 encoded zlib format compressed contents of module/kibana. func AssetKibana() string { - return "eJzMWUtv4zYQvudXDHzZy0ZADnvxoWjRFmhRbFCkDXooCmMsjS02FKlyqCTury+oh60H9bAkd9eHIJA03/fNDDlDje7hhU5beBF7VHgHYIWVtIXNL/mFzR1ARBwakVqh1Ra+uQMAKG5CoqNM0h0Ax9rYXajVQRy3cEDJ7qohSci0haMDZrJWqCNv4c8Ns9x8hE1sbbr56w7gIEhGvM2x70FhQpWiHVu0nN8AsKfUoRmdpeWVumHd2IqE2GKSnu9U1igFcu1qijbewubbs8WmA1YoCQxxqhXTzj0aJPg+Ebs0zx3pggQJ9xE6i4znsPgBKnjNQUKJNqfgYIh2Qu32J0uziMagKsrU6JCYgyx1frvnEiGlmMXZxPLFr3qilBYTpgGLf2knRSLsEk4/YuB3OtQqzIwhlW8NRaHbQrNcHkHq8dsQi8jZMNldrnZJqsfAe4KgOZAao+Ahmbm+2tZt4E+LgD/1Az8sQ37wQFcxpFcXOal1uotI4mlJPtpYvg1h6J+M2HJgtUU5r27lCC2ADn4kuFygs9ZYxeKDuXDVKijftA63qPCVDB5pOR2+HgfKfpaJaCIHk3kVIQWlQRPo6qZZdegOed3Y/TwngupXngxyrwFVBCZT985pSMgaEXJQe7x5QKh+bXl1ia3YXCS+0OlNm/a9htBSmlBsUYUEz88//+AlcX/XIulgVSRCRfQ+l+UREwJ9KNk+MAhlySiUHtiKMNZs1/LKYfV6Zg0qTt2BEKPIEPNCVrfIyXzgM2u+rhyBl/6VDAutFpL6UM77RGHKsfZHc6+1JGzTNyj+iMnGZMDGVNHtMyEjEAx4Ri+u+QW0D4ZLkukCSyht7IOtGKUOUU7fFOd+105SZeTW65FMtwTVTvvwFdafRmUdFtkH1Xa3k8hpoc242YrmCBmqRZM09FXjdreCvo51+SXiaLDIqDVZ27OBjMMl67ct7eO0tyn2Xd655T+4rfsrdIXxheO3XmP1rNhnfG5ME+FHbYbA/8xQCNbqXF3ZX7iXjS7KSd1N+/nnVPRIsDX6qoreNX8q+tJslFSiPWiTLAZYKqSYFfSa+wIMA0GuQ/eNrrosUqvjIFT+TrsSVsYULYC6nLewG9U1wvbgWxQX6BjlYXeQGtsloonyaRWUh3kwk8drMBTrsRabJXsyrsmGUpCyUKOABCMCq/OiV3SNAB61JbAxWtgb/cZkGEJUwKQiSDJpRSoJWLh/UZHOuIFodTVDqT2cIwMykzvLWtAqpI95U7IxnXJ4Q/cZExBb3EvBMUV12MB/KC8GR5NL3kikfi3gqvP1VQfPibPE0WSed19nJDzZdGiM1gRh9yoU9azvkWAB/Oh4wPFAzgNCQTELp1CrqE3ailRM2N35Q1VhVE+VPofck8PxrpdX0J6kjZRO12bQbqHPeNQBgN8de+EASvemailyO6pc6C7CfvR60f5i6n9yup0C2J+mS+79+PD/Cf+M7wFoGQGnGFKuqBn/Rx1R8DdXTn2ckIje/TvizgS5zzm2q+i1IPfuvNZke61iCfBUlvlQZ8peVTQbp0TPlH2tQF1aXzXWL/raGxmqUVP/sa/9lWEtZcVGVx19PZmrjdzn5e+phCg+J89MVmvYPyca3xUfHs4u5XpGGketdTQ/bswR8BnfRZIlEwX0vtpdPc37LUcoQ1y7c/3g7vaDH/9h66sdkTlhfsnleCEo9tlt5hvlJg5uNefI102g3b6RMv8Gt372S/ScrCf7/ooxu3t8LuC6+wEm1KFJr0uwQpX+vmCpEfa/k4DnI/LV770TNFVd1+VKsG19B6h+U16gh3svjE8KJukFT7ert97hiMKEXnxLpQ1x/wUAAP//yfELdw==" + return "eJzMWU2P2zYQve+vGPiSS1bAHnLxoWjRFmhRJCjSBj0UhTGWxhYbilQ51O66v74gJdn6oCxZkpv4sFhI4ntvZsgZcvgIn+m0hc9ijwofAKywkraw+cU/2DwAJMSxEbkVWm3hmwcAgPIlZDopJD0AcKqN3cVaHcRxCweU7J4akoRMWzg6YCZrhTryFv7cMMvNW9ik1uabvx4ADoJkwluP/QgKM6oV7diiZf8CwJ5yh2Z0kVdPmgObg63IiC1m+flNPRqlQG48zdGmW9h8ex6x6YGVSiJDnGvFtHOfRhm+TsSuhntD+iBRxkOEbkTBc1jCADW85iijTJtTdDBEO6F2+5OlWURjUDVlbnRMzFGRO7vdd5mQUszibGOF/Fd/UUlLCfOIxb+0kyITdglnGDEKGx1rFRfGkPJLQ1HsltAsk0eQBuw2xCJxY5jszqtdEuox8AEnaI6kxiR6ymbOr+7oLvC7RcDvhoGfliE/BaBrH9Kz85zUOt8lJPG0JB5drNCCMPRPQWw5stqinJe3PEIHoIefCK4m6Kw5VrOEYC5cjQzKd83DHSp8JoNHWk6Hz8crab8oRDKRg8k8i5iiakAb6OaiSRLZipgJTZxGsSzYkokCYj7T6UWbpAdQl/jegCa7+wW2FPWv2lp4twGqBEyhHp3XICNrRMxR4/P2DqP+de1rSuw4d9imgNBKmlBsUcUEnz79/EOQxP1di6SHVZMIldDrXJYPmBHoQ8X2hkEoS0ahDMDWhKlmu5ZVDmvQMmtQce52lJgkhpgXsrpVQuYNn1n9vHIEQfpnMiy0WkgaQjmvE4U5pzrszb3WkrBL36L4IyWbkgGbUk23L4RMQDDgGb18FhbQ3VkuCaZzLKG0aQi2ZpQ6Rjl9UZwLZjdI9SA3X49k+imocVyArzD/tFLzdZFDUF1ze4Gc5tqC27VsjpBruWiShqFs3C13MFTyLr9MHA2WEbWm6Fp2JeJwifp9U/s47X2SfZ93bvqP7mv+ClVhfOKER68xe1asMyEzpokIo7ZdEP7mmgvWqlx92V+4lo1OyknVTYf552T0RLA1+qaM3h/+saxLs1FyifagTbYYYKmQstkwODzkYLji5Cb0UO+rzyK1Ol6F8ofilbAKpmQB1GW/hX2vruG2p9CkuECnKA+7g9TYTRFtlHeroDzNg5ncn4Nrvh4rsUW2J+OKbCwFKQsNCsgwIbDaJ72yakTwQVsCm6KFvdEvTIYhRgVMKoGskFbkkoCF+xcV6YJbiFbXTZjGxx4ZkJncXtaCVjG99UXJpnTy8IYeCyYgtriXglNKmrBReFNedp4mp7wRT/1awtX765s2nhObkaPBPK++Xk958tBrfbg2CLujUDIwv0ecBfCj4wHHA54HhIKymU6xVkmXtOOplLC/8q9lhVE9dfgc8kAMx6uez6ADQRtJna7MoN3C0OBRAwB+d+ylASjdSdVS4lZUNdGdh8PozaT9xdT/5HQ7BbA/TZc8eHvx/wl/j68RaJkA5xiTV9T2/wedUPQ310a9nRCIwfU7Ys4EuZ88tsvoDScPrrxOa3ytZAnwsUrzsS6UvSlptnaJgTb9Wo66lL76XqCsay9kqEFNw9u+7jXFWsrKha56+gYi1+jZz4vfxwqivI+eGazObcEcb3xX3lycTfJ6RgpHo3S0b0fmCHiPryIrsokCBo92N3fzfvMIlYsbb25v3N2/8RPebH21LTInLCy5ai9E5Tq7T3+jWsTRvfocft5E2q0bKf0l3vrRr9A92UD0wxljdvV4X8L11wNMyEOTjkuwQpb+vmRpEA6fSSBwC33zuXeCprrqulgJf0va1zHtAH299sJ4p2CSXghUu2bpve5RmFCL76m0Je6/AAAA//9Bvx66" } diff --git a/metricbeat/module/kibana/stats/data.go b/metricbeat/module/kibana/stats/data.go index 23696d98670..79e6454f805 100644 --- a/metricbeat/module/kibana/stats/data.go +++ b/metricbeat/module/kibana/stats/data.go @@ -123,7 +123,7 @@ func eventMapping(r mb.ReporterV2, content []byte, isXpack bool) error { event.Error = elastic.MakeErrorForMissingField("cluster_uuid", elastic.Kibana) return event.Error } - event.RootFields.Put("elasticsearch.cluster.id", elasticsearchClusterID) + event.ModuleFields.Put("elasticsearch.cluster.id", elasticsearchClusterID) // Set service ID uuid, err := dataFields.GetValue("uuid") From eef7d855f45b5a34b785fb01876722bb427e40b6 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 28 Dec 2021 02:05:32 -0500 Subject: [PATCH 50/57] [Automation] Update elastic stack version to 8.1.0-b989cb5c for testing (#29618) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 2b252a4a60c..72ecd550e0b 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-befff95a-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-b989cb5c-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -21,7 +21,7 @@ services: - "script.context.template.cache_max_size=2000" logstash: - image: docker.elastic.co/logstash/logstash-oss:8.1.0-befff95a-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:8.1.0-b989cb5c-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -31,7 +31,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-befff95a-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-b989cb5c-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index d46af8af39a..9c508bb04ff 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-befff95a-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-b989cb5c-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -37,7 +37,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-befff95a-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-b989cb5c-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From 7c0428e25361760c8ba93be9faa9ddf8e14da999 Mon Sep 17 00:00:00 2001 From: martin Date: Tue, 28 Dec 2021 17:55:45 +0800 Subject: [PATCH 51/57] Fix gcp metrics metricset apply aligner to all metric_types (#29513) (#29514) Co-authored-by: endorama <526307+endorama@users.noreply.github.com> --- CHANGELOG.next.asciidoc | 1 + .../metricbeat/module/gcp/metrics/metrics_requester.go | 9 ++++----- x-pack/metricbeat/module/gcp/metrics/metricset.go | 10 +++++++++- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 419b02cb29b..b99794d9a4f 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -96,6 +96,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Use xpack.enabled on SM modules to write into .monitoring indices when using Metricbeat standalone {pull}28365[28365] - Fix in rename processor to ingest metrics for `write.iops` to proper field instead of `write_iops` in rds metricset. {pull}28960[28960] - Enhance filter check in kubernetes event metricset. {pull}29470[29470] +- Fix gcp metrics metricset apply aligner to all metric_types {pull}29514[29513] *Packetbeat* diff --git a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go index e5cf8c8b05d..d518acc0014 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go +++ b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go @@ -69,22 +69,21 @@ func (r *metricsRequester) Metric(ctx context.Context, serviceName, metricType s return } -func (r *metricsRequester) Metrics(ctx context.Context, sdc metricsConfig, metricsMeta map[string]metricMeta) ([]timeSeriesWithAligner, error) { +func (r *metricsRequester) Metrics(ctx context.Context, serviceName string, aligner string, metricsToCollect map[string]metricMeta) ([]timeSeriesWithAligner, error) { var lock sync.Mutex var wg sync.WaitGroup results := make([]timeSeriesWithAligner, 0) - aligner := sdc.Aligner - for mt, meta := range metricsMeta { + for mt, meta := range metricsToCollect { wg.Add(1) metricMeta := meta go func(mt string) { defer wg.Done() - r.logger.Debugf("For metricType %s, metricMeta = %d", mt, metricMeta) + r.logger.Debugf("For metricType %s, metricMeta = %d, aligner = %s", mt, metricMeta, aligner) interval, aligner := getTimeIntervalAligner(metricMeta.ingestDelay, metricMeta.samplePeriod, r.config.period, aligner) - ts := r.Metric(ctx, sdc.ServiceName, mt, interval, aligner) + ts := r.Metric(ctx, serviceName, mt, interval, aligner) lock.Lock() defer lock.Unlock() results = append(results, ts) diff --git a/x-pack/metricbeat/module/gcp/metrics/metricset.go b/x-pack/metricbeat/module/gcp/metrics/metricset.go index d284d80ca5d..e121d6a3a02 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metricset.go +++ b/x-pack/metricbeat/module/gcp/metrics/metricset.go @@ -165,7 +165,15 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) (err error) { for _, sdc := range m.MetricsConfig { m.Logger().Debugf("metrics config: %v", sdc) - responses, err := m.requester.Metrics(ctx, sdc, m.metricsMeta) + // m.metricsMeta contains all metrics to be collected, not just the one in the current MetricsConfig. + // this loop filters the metrics in metricsMeta so requester.Metrics can collect only the appropriate + // ones. + // See https://github.com/elastic/beats/pull/29514 + metricsToCollect := map[string]metricMeta{} + for _, v := range sdc.MetricTypes { + metricsToCollect[sdc.AddPrefixTo(v)] = m.metricsMeta[sdc.AddPrefixTo(v)] + } + responses, err := m.requester.Metrics(ctx, sdc.ServiceName, sdc.Aligner, metricsToCollect) if err != nil { err = errors.Wrapf(err, "error trying to get metrics for project '%s' and zone '%s' or region '%s'", m.config.ProjectID, m.config.Zone, m.config.Region) m.Logger().Error(err) From ffbc6e6747c6e80b114ba5b772502dbb9fbea137 Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 28 Dec 2021 12:09:52 +0100 Subject: [PATCH 52/57] ci: enable 7.17 branch (#29602) --- .ci/schedule-daily.groovy | 3 ++- .ci/schedule-weekly.groovy | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.ci/schedule-daily.groovy b/.ci/schedule-daily.groovy index 2211dec4c49..5c1cc904f5c 100644 --- a/.ci/schedule-daily.groovy +++ b/.ci/schedule-daily.groovy @@ -25,7 +25,8 @@ pipeline { runBuild(quietPeriod: 2000, job: 'Beats/beats/8.0') // This should be `current_7` bump.getCurrentMinorReleaseFor7 or // `next_minor_7` bump.getNextMinorReleaseFor7 - runBuild(quietPeriod: 4000, job: 'Beats/beats/7.16') + runBuild(quietPeriod: 4000, job: 'Beats/beats/7.17') + runBuild(quietPeriod: 6000, job: 'Beats/beats/7.16') } } } diff --git a/.ci/schedule-weekly.groovy b/.ci/schedule-weekly.groovy index c2d96964575..6d5c1a7f007 100644 --- a/.ci/schedule-weekly.groovy +++ b/.ci/schedule-weekly.groovy @@ -25,7 +25,8 @@ pipeline { runBuild(quietPeriod: 1000, job: 'Beats/beats/8.0') // This should be `current_7` bump.getCurrentMinorReleaseFor7 or // `next_minor_7` bump.getNextMinorReleaseFor7 - runBuild(quietPeriod: 2000, job: 'Beats/beats/7.16') + runBuild(quietPeriod: 2000, job: 'Beats/beats/7.17') + runBuild(quietPeriod: 3000, job: 'Beats/beats/7.16') } } } From 2b23068a87f7ef3cb7469761169476469ab92704 Mon Sep 17 00:00:00 2001 From: Kevin Lacabane Date: Tue, 28 Dec 2021 16:48:32 +0100 Subject: [PATCH 53/57] kibana.stats: extract correct index property (#29622) * kibana.stats: extract correct index property * changelog entry --- CHANGELOG.next.asciidoc | 1 + metricbeat/module/kibana/stats/_meta/data.json | 4 ++-- metricbeat/module/kibana/stats/data.go | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index b99794d9a4f..6e0eb0967ac 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -97,6 +97,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Fix in rename processor to ingest metrics for `write.iops` to proper field instead of `write_iops` in rds metricset. {pull}28960[28960] - Enhance filter check in kubernetes event metricset. {pull}29470[29470] - Fix gcp metrics metricset apply aligner to all metric_types {pull}29514[29513] +- Extract correct index property in kibana.stats metricset {pull}29622[29622] *Packetbeat* diff --git a/metricbeat/module/kibana/stats/_meta/data.json b/metricbeat/module/kibana/stats/_meta/data.json index dde2be30ac3..46e85773eeb 100644 --- a/metricbeat/module/kibana/stats/_meta/data.json +++ b/metricbeat/module/kibana/stats/_meta/data.json @@ -16,7 +16,7 @@ "host": { "name": "0.0.0.0" }, - "index": "b04775fa6831", + "index": ".kibana", "name": "b04775fa6831", "os": { "distro": "CentOS", @@ -88,4 +88,4 @@ "type": "kibana", "version": "7.14.0" } -} \ No newline at end of file +} diff --git a/metricbeat/module/kibana/stats/data.go b/metricbeat/module/kibana/stats/data.go index 79e6454f805..a2148f64e84 100644 --- a/metricbeat/module/kibana/stats/data.go +++ b/metricbeat/module/kibana/stats/data.go @@ -52,7 +52,7 @@ var ( "uuid": c.Str("kibana.uuid"), "name": c.Str("kibana.name"), - "index": c.Str("kibana.name"), + "index": c.Str("kibana.index"), "host": s.Object{ "name": c.Str("kibana.host"), }, From 6794652a17dddbba371c2d27e1f71674de70a813 Mon Sep 17 00:00:00 2001 From: Marc Lopez Rubio Date: Wed, 29 Dec 2021 00:37:14 +0800 Subject: [PATCH 54/57] apm-server: Remove `data_streams.enabled` flag (#29168) * apm-server: Remove `data_streams.enabled` flag Removes the `"apm-server.data_streams.enabled` flag in the `apm-server` spec, since it's been marked as deprecated in `8.0`. Signed-off-by: Marc Lopez Rubio --- x-pack/elastic-agent/pkg/agent/program/supported.go | 2 +- x-pack/elastic-agent/spec/apm-server.yml | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/x-pack/elastic-agent/pkg/agent/program/supported.go b/x-pack/elastic-agent/pkg/agent/program/supported.go index 70e7c6f716f..220f187a3fe 100644 --- a/x-pack/elastic-agent/pkg/agent/program/supported.go +++ b/x-pack/elastic-agent/pkg/agent/program/supported.go @@ -25,7 +25,7 @@ func init() { // spec/metricbeat.yml // spec/osquerybeat.yml // spec/packetbeat.yml - unpacked := packer.MustUnpack("eJzEWll34ri2fr8/o1/vcDwU6fZd6zxg0p4gTmGCJOsNycQGJEMHM9h33f9+luQB2zipSlV3nwdWgpClra09fPvb/r9fjoc1/cfqwP/7uH47r9/+J+fsl//9hXArwy/7eA5MfwZ8RlPMaHzYEjh/cG3rQhZqgZGnYeROQ+QpK4iTUB/8LaXFPoaXfexO3CxYuEd34mUhHCVYAxmGI2XGwSmE3hHDuRE5nooX7nGyGcfuRrXczSV2eWfNE7YtJQRGETkeC6FafPv5aIt0k1HuM5LODc/JzOXv6ksAPBgA7zVQDGde7K9Pj6bhxodowsEXaht5ZIMd0lQWOd4h1J8eXOs4dSfjTYjMbIYqnWzc44QpU5qCI0ZPD2Lf2cLcEt0cIT04I+16oPpcjruTcezaTMFQeXBtfMQQKM24E5yfN+aBpKYaOU9TOTYZx0QbvYaaccL8eij1OzoTfSx+z1xbTejjvplLbUtZPe5jzK8Mo/ltvCVbPTZbmDmG6jni4HWlgdFzvG9+Kz/mG0Y7cZ/bUAMFVY2E2kzO/aF1HI+VOmUnfGnPUWLKQUZ0zJCWsfXL7Tz1R667MYW9nKLxXj6DOfuCdF+hHCTkZR+vdaXSCT4QJ2CUGVoIr2rn3I7PiA22kW3kQ7qu9lHWyGS3Z3BCHMBo0ZErk3Y+b2Q5RjbIb2c3CwyvLNSDM03v9H63b7meoUaOqZbnu+mmdZeZa7PTioNtZBl7DK0dRl7xvDF/fZ0f9JUNTs8b84jhKI3seO85WbWPb0wX4/90H8dxCEc7104SqmRsvYh3a63a01GO7iRixLaKyGZbqoGEcn/v5ZfY0z2GbVZ4+UXIkK40i6+039PZZJwS20ipHiRUi9PpfP/PX/6rDCbrNDrsN2nWCyUBHO2obRxIOo+XGthGyDtEzm4aaurueWMywoML0dgpmqgFhr5KOVPW80NC0+CAubWNhGnf1siwDbRJKt3wEGrLB/cx1J8f42kIfWUFjRPS2Ik6QEF6MKI2KJ7jfeba4IQd87yCI2XCr2esGpcQBfvyes1diDx9Bb88uBP3/GKzDeVWvl4YVq2amXJ7fqb7SogCNtOuZ5wbLfmVP2Zi7dwVax5XcKSuH/exuzHO1JmfA3hNqB4cwtywbs8YRWRbCl4YR6LRc/uc081IjG2EGUUaO2Hb0EVIdXdPD8i6zik3UsqtzP0dH4gNCmRdG3nl//Ue1pWK64psQJEtzn6lg/twf4+h/yb1pwcJsS8Pk40SY5SwUDX4Cl5Zbep1yHF5Sy/IZ6EO8hUKRm41r0oD09qsXRE6OePrhXsb2yiZMKn6mdlivKF6IMw8r8cim2UYGqqwhadiPKW2UUSWkN9XQng9Vnf8BUP/VbglrsOJYyaRHT+4E2/Yzmo5bCvHeuOymTvxmrXbcs0WanMn1bwisgNGU7c15mYzBC5Y9xJsL3vjHqOaoYqURPOWDt7RY3f+6GGFxtV6prKCKiM6UJ43Y+3pcTyljseQDk4rOBI2dSSP++lsYbK1DbZIEzayrM5nStt/3ow3bTugN9+s90goj4pWaBfnVQlv7GNzC3H39zisnwG5mzQ1HN6rcRlqkd4Lyx+FdlumlThy2AXPKzvi1jGCoDmT0E9jF2OpL2HnCkbea38u1cARQ18huvsgQrKIMbRKaVUKYYRbG2KDXXXWfirKXCfII7iUZyLQuvT9qZPCHU8ldkfW91NudVaqgTziIJ9If6hS4vZeV22f7MIGJV7B0SVCQdHI3EtRUg6ED1RjZxLvp5GWMLLdx0TEWD3YTyfBr+WaQS8FXRnhkbKaiBRU6U9XDu7jl/hpYiaEz+OVbRULDYzEGsJGxJzXxSX2NHAMkYjvfoGhlYcy9Ry2RBsJOJgIvxGxkXBDccX6uqeSNDgQuDyFyNuuHCX++qLEnmbl5CVUvLzcz3OyPIIjaZMzjhMC2XGNqrkyBSZJNKGl/JPgV5qCk4xFi1EWwsOZptXcgqbTxXhap8LXDVuT9eouFYrQBD0Wonmd/mRYDTlIovGhdLeNSTooNvVZ5IDLjLMjWYwaE/sKhSv4zN3IDL2ZLZeb2WS8oRpQIjQ+RTbIqH1NInt5wnCUhOJKHlUewmtxj5TVhHArxcI103l7vkJTcLeHcHMsUlI+OmKEGXlUdxh6Ks6/icDtxfJqzXfABJbhvCjR4/P298uTo2wEmu5WFEJPQTGToQtsMLSUSeoxiTzS4FUg5NpEkObvQzhKsXR3T8XzQx7BqwwT0qVR8kr1IMfQykrktG+jqgPhAVvXiNoRsGH54IpUqT9JV13B0R/C9ZvQBIwL5cYWI78Q4aBy9zNhhjBJTmwmYYsIxRh5CtIsLsJXHQIF0hSojWhRUbpqC9HXKasXYnpoPnNt/0wd9ipS1GDFIdPmbw+uU8mM2sjzXlbCjTNto1AbfAk1cBG/wdxrqq7yXtmu/NtUYKXtOd5ZVg2akdPci/qyRrbxSmxWRI9tVG0ehK0+b8yWTr3iR89x07nHMDdyPJc2kAubJrBJgZxyI7tLGZ1qzG/OPKnghQg1oR6UZ7AMKfctFfXuTe/JW1eA/XP0KsD3UkM3LJuNfddhW8hGUv8ooGknNdRylXbd1l0WIvOCkduxGQFdiRaVcE/aKO1WZjbQZPVeQQrpJ5du9SdjQjo/CxgnYbXjK9hmp94+LOJAQGAl1MdCvm3H/lrrRDC4PG9MFTvjniwSgu+I5r+Jc7h2cA61jNFeNSri1ayqaJDuH4keiXPJ6lSM3Z+fnqnOCvHc88Ys1shv6eGjyrWuekGBgXGOUHCJWqn1m8/ZAqZbTay6wQaPEWhoGBhyXlveCnrsQhQkTXxajE4hVBnVzSTUlj+8/4zL74WAB38xDEsi/SkLtau4az1EwXY17v5Gi6fmHCE6qJQvs9I+gn0Eb1C6WoMTXUBqb9SOQSQNBFRo7GO2MGvbucEhzb/MkKmGqa+Gt3n7yAkuSGuVkc26iRI55h9UM063scM5Qt4phNfdbSxLMM+S2/eb38wWZkZR0FpzxCIbH4l+szlSPGk+tFRsM6VtFy37zXp+Jr6PqNbZR/jaLWbA4HKbC04rFN9+09hJ2P9NprLMLGPiz8PyBmOM37UJiT3K2Nvk6pINkjkbn6tcPq2Zt/pZnHpnUT70YqZCir2UucZg7TPcw3yvLUsLpzVjd/4tfJHqwZnyZRc3aAkLoSiDnh5cJzMm8SBzc9tjMvq3sTivbL3OhknhoKwU4mVdZXA/w7eqJWuqD15W0651FBVzeWUTNSNawNw7mFcSog2xGh/aqU2a3dqqSV+lUl+XgLszv29Ua7cw3U2JfRPsVUhZq7r6c/a3G/j0TRlKWFvp5L10ULlWDYtrOWtZkKhy7d8GiVFJ1ucmJzZg0WTUkO71WjN+V4HFaN6cp2IUbi5REeI1IfoqQiYZ1I8kM0ljB2lNsI8uRLseQn13WsH50F51WDk9TZq59b4HItcJXrENeIjAMXKGCeF7gvdOjj3RfaVH5t7pSZLcwyTuqbabGRf6Bznh1hHp5pmm82/tXVDtckf012Ftth2f+jbZho4tXZXyNvL14WMrxLcZg9ZnKCx2P0ocwkjAtbtnqzR5pnpjF1mIxq37GC553pXzAzjUgRzIZ0gbJti/s/nQhVXzP2GNQWj2uXN1G2FK2RhDPvvBM7bLnB9pbPRsazzEIt2lzMZP+aGMDzZIsA1kHJKsXxrtsSgdOqxR6R+vi138dTO+uLZ1wpM/u/mxa1icZL16ywZonIUNEpoGJSVR5cBVZ6yV/3q0zApes3bjEnPrSLVyzmcpnM80VltzRXmXruAonfGrKMGOX2HAwhSk97m5pmASJsYrmirHyFdCWWIbp8a/LGO7EvehLR9qlrPXHB2iYYbzqGroKxTskYAvGvjSbnYON+s8UaquqS7ia8Lkfea/naaXAfvddmHhR4zvR899BGUHmN8upO3Zfhk3ZY7kpBhoam7fyZvx53yuG48ztkZyHTbsaz/P0AqYSTnYrdBTOpO6id5CiN/CBRW+J+kvUV6uJvQwif/ZQFC+zt42dMDxXiBQKGfbyhCrtxKqTr1W8arDbx4UGAUqFbjaVr7Nh9acaxowgkzJqQw67fhn3m64nrEWHQinJyI5lYuBbbCJIO2vm4aqccHI24p1vy6CX1+WYLncscchHrUvE0ZBvoK+DEwz7p8JxwecjwQYl+2FoXO9z8V2dU05EBeYR5ZxJqzmO4LXUEsSwiPhlKXBp00rYxjQd2oldsI2+FIbt+QNBKit7p5eZL3XOE8dUJBu5kTzGdX9c+MwtghQ8szHFfSVEjSWwDCEWGnq/obzbd76kPUblbpR2ffUlfVYLU8lZwvo9IFri198h9MLNeOyBkZC7Ot73Kncu7VnCyzcnf1ENOPSDg4YJVuMTEUC8LThJWUSWlWcbuMrE2lPHY5WJIyerApRjeMK+Uq3PVVzn607Sp9+9By3O+SAywD2N/Oz352wBrgvypcPrvXlNM2N2jcLb/xhi/Hf3pb8DPeM9OgQ2ckr5SDFKLl8JxedCx9Hm/gfy8erTO5fN1/epot7HZXriD3iB3cStIFACaDLXNFeu+b6u6Ch7ivY6hk74Fjfj/RZmDGkWTnl1mjQjps40QPdpa00MuN2G/XbHG7ruc9wxn0S4W/lmeV3AVj/dq66x7F3c4iVilwpcwn/rSaNqj7U7nt6QJ28KZ/t25aNc6IpffxxGsoNBBo76TMdX276hkPFVdbHNbVeGnJnMwDkPs859vBB5Qvop3hHyTU2wO87ecf98Y/T+i0fQn26f40gyNfdzvmZ6paKkTfqd88/0Tn/POJrd8GhdZLQHYJTNGmtj2Q07c59t2PuRZ/oandeipPndp7OpK+fD1+EMwqKAKPpbvqnoDJxm38pIrt14f/C7lNjS9/ZbfhOD+5m4w798ReVXEMvxXRfchHzEsOd/G64E1o8P4Zpu/Q6rOhuPcR5LG1ru9KA0im9HBGyMxbZvdIrp1lQ1njfKLvEnLu5Cobqhci3Iu+dVrY1ctWSf7WPX1vpzn233ErRO45Fu2f+cd7jJ/mFtvF+wC1cQui/4QFubYhf6Mrmfic33udneylpOMX83a2s6S///x//CgAA///dB29r") + unpacked := packer.MustUnpack("eJzEWll34ri2fr8/o1/vcDwU6fZd6zxg0p4gTmGCJOsNycQGJEMHM9h33f9+luQB2zipSlV3nwdWgpClra09fPvb/r9fjoc1/cfqwP/7uH47r9/+J+fsl//9hXArwy/7eA5MfwZ8RlPMaHzYEjh/cG3rQhZqgZGnYeROQ+QpK4iTUB/8LaXFPoaXfexO3CxYuEd34mUhHCVYAxmGI2XGwSmE3hHDuRE5nooX7nGyGcfuRrXczSV2ebRFusko9xlJ54bnZObyd/UlAB4MgPcaKIYzL/bXp0fTcONDNOHgC7WNPLLBDmkqixzvEOpPD651nLqT8SZEZjZD1Zk27nHClClNwRGjpwex72xhbolujpAenJF2PVB9LsfdyTh2baZgqDy4Nj5iCJRm3AnOzxvzQFJTjZynqRybjGOijV5DzThhfj2U+hmdiT4Wv2eurSb0cd/MpbalrB73MeZXhtH8Nt6SrR6bLcwcQ/UccfC60sDoOd43v5Uf8w2jnbiPbaiBgqpGQm0m5/7QOo7HSp2yE7605ygx5SAjOmZIy9j65Xae+iPX3Zjivk/ReC+fwZx9QbqvUA4S8rKP17pS6QQfiBMwygwthFe1c27HZ8QG28g28iFdV/soa2Sy2zM4IQ5gtOjIlUk7nTeyHCMb5LezmwWGVxbqwZmmd3q/27dcz1Ajx1TL891007rLzLXZacXBNrKMPYbWDiOveN6Yv77OD/rKBqfnjXnEcJRGdrz3nKzaxzemi/F/uo/jOISjnWsnCVUytl7Eu7VW7ekoR3cSMWJbRWSzLdVAQrm/9/JL7OkewzYrvPwiZEhXmsVX2u/pbDJOiW2kVA8SqsXpdL7/5y//VQaDdRod9ps064WCAI521DYOJJ3HSw1sI+QdImc3DTV197wxGeHBhWjsFE3UAkNfpZwp6/khoWlwwNzaRsK0b2tk2AbaJJVueAi15YP7GOrPj/E0hL6ygsYJaexEHaAgPRhRGxTP8T5zbXDCjnlewZEy4dczVo1LiIJ9eb3mLkSevoJfHtyJe36x2YZyK18vDKtWzUy5PT/TfSVEAZtp1zPOjZb8yh8zsXbuijWPKzhS14/72N0YZ+rMzwG8JlQPDmFuWLdnjCKyLQUvjCPR6Ll9zulmJMY2wowijZ2wbegiJLq7pwdkXeeUGynlVub+jg/EBgWyro288v96D+tKxXVFNqDIFme/0sF9uL/H0H+T+tODhNiXh8lGiTFKWKgafAWvrDb1OuS4vKUX5LNQB/kKBSO3mleF8Wlt1q4InZzx9cK9jW2UTJhU/cxsMd5QPRBmntdjkc0yDA1V2MJTMZ5S2ygiS8jvKyG8Hqs7/oKh/yrcEtfhxDGTyI4f3Ik3bGe1HLaVY71x2cydeM3abblmC7W5k2peEdkBo6nbGnOzGQIXrHsJtpe9cY9RzVBFSqJ5Swfv6LE7f/SwQuNqPVNZQZURHSjPm7H29DieUsdjSAenFRwJmzqSx/10tjDZ2gZbpAkbWVbnM6XtP2/Gm7Yd0Jtv1nsklEdFK7SL86qEN/axuYW4+3sc1s+A3E2aGg7v1bgMtUjvheWPQrst00ocOeyC55UdcesYQdCcSeinsYux1JewcwUj77U/l2rgiKGvEN19ECFZxBhapbQqhTDCrQ2xwa46az8VZa4T5BFcyjMRaF36/tRJ4Y6nErsj6/sptzor1UAecZBPpD9UKXF7r6u2T3ZhgxKv4OgSoaBoZO6lKCkHwgeqsTOJ99NISxjZ7mMiYqwe7KeT4NdyzaCXgq6M8EhZTUQKqvSnKwf38Uv8NDETwufxyraKhQZGYg1hI2LO6+ISexo4hkjEd7/A0MpDmXoOW6KNisjxEuE3IjYSbiiuWF/3VJIGBwKXpxB525WjxF9flNjTrJy8hIqXl/t5TpZHcCRtcsZxQiA7rlE1V6bAJIkmtJR/EvxKU3CSsWgxykJ4ONO0mlvQdLoYT+tU+Lpha7Je3aVCEZqgx0I0r9OfDKshB0k0PpTutjFJB8WmPosccJlxdiSLUWNiX6FwBZ+5G5mhN7PlcjObjDdUA0qExqfIBhm1r0lkL08YjpJQXMmjykN4Le6RspoQbqVYuGY6b89XaAru9hBujkVKykdHjDAjj+oOQ0/F+TcRuL1YXq35DpjAMpwXJXp83v5+eXKUjUDT3YpA6CkoZjJ0gQ2GljJJPSaRRxq8CoRcmwjS/H0IRymW7u6peH7II3iVYUK6NEpeqR7kGFpZiZz2bVR1IDxg6xpROwI2LB9ckSr1J+mqKzj6Q7h+E5qAcaHc2GLkFyIcVO5+JswQJsmJzSRsEaEYI09BmsVF+KpDoECaArURLSpKV20h+jpl9UJMD81nru2fqcNeRYoarDhk2vztwXUqmVEbed7LSrhxpm0UaoMvoQYu4jeYe8LNWAjVorxXtiv/NhVYaXuOd5ZVg2bkNPeivqyRbbwSmxXRYxtVmwdhq88bs6VTr/jRc9x07jHMjRzPpQ3kwqYJbFIgp9zI7lJGpxrzmzNPKnghQk2oB+UZLEPKfUtFvXvTe/LWFWD/HL0K8L3U0A3LZmPfddgWspHUPwpo2kkNtVylXbd1l4XIvGDkdmxGQFeiRSXckzZKu5WZDTRZfVeQQvrJpVv9yZiQzs8CxklY7fgKttmptw+LOBAQWAn1sZBv27G/1joRDC7PG1PFzrgni4TgO6L5b+Icrh2cQy1jtFeNing1qyoapPtHokfiXLI6FWP356dnqrNCPPe8MYs18lt6+KhyrateUGBgnCMUXKJWav3mc7aA6VYTq26wwWMEGhoGhpzXlreCHrsQBUkTnxajUwhVRnUzCbXlD+8/4/J7IeDBXwzDkkh/ykLtKu5aD1GwXY27v9HiqTlHiA4q5custI9gH8EblK7W4EQXkNobtWMQSQMBFRr7mC3M2nZucEjzLzNkqmHqq+Ft3j5yggvSWmVks26iRI75B9WM023scI6QdwrhdXcbyxLMs+T2/eY3s4WZURS01hyxyMZHot9sjhRPmg8tFdtMadtFy36znp+J7yOqdfYRvnaLGTC43OaC0wrFt980dhL2f5OpLDPLmPjzsLzBGON3bUJijzL2Nrm6ZINkzsbnKpdPa+atfhan3lmUD72YqZBiL2WuMVj7DPcw32vL0sJpzdidfwtfpHpwpnzZxQ1awkIoyqCnB9fJjEk8yNzc9piM/m0szitbr7NhUjcoK4V4WVcZ3M/wrWrJmuqDl9W0ax1FxVxe2UTNiBYw9w7mlYRoQ6zGh3Zqk2a3toza7Cr1dQm4O/P7RrV2C9PdlNg3wV6FlLWqqz9nf7uBT9+UoYS1lU7eSweVa9WwuJazlgWJKtf+bZAYlWR7bnJiAxZNRjURf6rXmvG7CixG8+Y8FaNwc4mKEK8J0VcRMsmgfiSZSRo7SGuCfXQh2vUQ6rvTCs6H9qrDyulp0syt9z0QuU7wim3AQwSOkTNMCN8TvHdy7InuKz0y905PkuQeJnFPtd3MuNA/yAm3jkg3zzSdf2vvgmqXO6K/Dmuz7fjUt8k2dGzpqpS3ka8PH1shvs0YtD5DYbH7UeIQRgKu3T1bpckz1Ru7yEI0bt3HcMnzrpwfwKEO5EA+Q9owwf6dzYcurJr/CWsMQrPPnQvblhKCJiae5Hfksx88Y7vM+ZHGRs+2xkMs0l3KbPyUH8r4YIME20DGIcn6pdEei9KhwxqV/vG62MVfN+OLa1snPPmzmx+7hsVJ1qu3bIDGWdggoWlQUhJVDlx1xlr5r0fLrOA1azcuMbeOVCvnfJbC+UxjtDVXlHfpCo7SGb+KEuz4FQYsTEF6n5trCiZhYryiqXKMfCWUJbZxavzLMrYrcR/a8qFmOXvN0SEaZjiPqoa+QsEeCfiigS/tZudws84Tpeqa6iK+JkzeZ/7baXoZsN9tFxZ+xPh+9NxHUHaA+e1C2p7tl3FT5khOioGm5vadvBl/zue68ThjayTXYcO+9vMMrYCZlIPdCj2lM6mb6C2E+C1cUOF7kv4S5eVqQg+T+J8NBOXr7G1DBxzvBQKFcratDLF6q6Dq1GsVrzr85kCBUaBSgatt5dt8aM25pgEjyJScyqDTjn/m7YTrGWvRgXB6IpJTuRjYBpsI0v66aagaF4y8rVj36yL49WUJlssdexziUfsyYRTkK+jLwDTj/plwfMD5SIBx2V4YOtf7XGxX15QDcYF5ZBlnwmq+I3gNtSQhPBJOWRp82rQyhgF9p1ZiJ2yDL7VxS95AgNrq7ulF1nuN89QBBelmTjSfUd0/Nw5jiwAlz3xcQV8pQWMJDEOIlabubzjf5q0PWb9RqRuVfU9dWY/V8lRytoBOH7i2+MV3OL1QMy5rYCTEvr7Hncq9W3u2wMLd2U9EMy7t4IBRssXIVCQATxteUiahVcXpNr4ykfbU4WhFwujJqhDVOK6Qr3TbUzX32bqj9OlHz3G7Qw64DGB/Mz/73QlrgPuifPngWl9O09yofbPwxh+2GP/tbcnPcM9Ijw6RnbxSDlKMkst3ctG58HG0if+xfLzK5P518+VturjXUbmO2CN+cCdBGwiUALrMFe21a66/CxrqvoKtnrEDjvX9SJ+FGUOalVNujQbtuIkTPdBd2kojM263Ub/N4bae+wxn3CcR/laeWX4XgPVv56p7HHs3h1ipyJUyl/DfatKo6kPtvqcH1Mmb8tm+bdk4J5rSxx+nodxAoLGTPtPx5aZvOFRcZX1cU+ulIXc2A0Du85xjDx9UvoB+ineUXGMD/L6Td9wf/zit3/Ih1Kf71wiCfN3tnJ+pbqkYeaN+9/wTnfPPI752FxxaJwndIThFk9b6SEbT7tx3O+Ze9ImuduelOHlu5+lM+vr58EU4o6AIMJrupn8KKhO3+ZcislsX/i/sPjW29J3dhu/04G427tAff1HJNfRSTPclFzEvMdzJ74Y7ocXzY5i2S6/Diu7WQ5zH0ra2Kw0ondLLESE7Y5HdK71ymgVljfeNskvMuZurYKheiHwr8t5pZVsjVy35V/v4tZXu3HfLrRS941i0e+Yf5z1+kl9oG+8H3MIlhP4bHuDWhviFrmzud3LjfX62l5KGU8zf3cqa/vL///GvAAAA//9M8ln7") SupportedMap = make(map[string]Spec) for f, v := range unpacked { diff --git a/x-pack/elastic-agent/spec/apm-server.yml b/x-pack/elastic-agent/spec/apm-server.yml index 993c28498a8..0258eb9fb0f 100644 --- a/x-pack/elastic-agent/spec/apm-server.yml +++ b/x-pack/elastic-agent/spec/apm-server.yml @@ -3,7 +3,6 @@ cmd: apm-server artifact: apm-server args: [ "-E", "management.enabled=true", - "-E", "apm-server.data_streams.enabled=true", "-E", "gc_percent=${APMSERVER_GOGC:100}" ] exported_metrics: [ From 4ae6306b73bebab9cc51d366690e6d73c821792b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 29 Dec 2021 02:50:18 -0500 Subject: [PATCH 55/57] [Automation] Update elastic stack version to 8.1.0-c1a942c7 for testing (#29630) Co-authored-by: apmmachine --- testing/environments/snapshot-oss.yml | 6 +++--- testing/environments/snapshot.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/testing/environments/snapshot-oss.yml b/testing/environments/snapshot-oss.yml index 72ecd550e0b..eea246eaeb8 100644 --- a/testing/environments/snapshot-oss.yml +++ b/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-b989cb5c-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-c1a942c7-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -21,7 +21,7 @@ services: - "script.context.template.cache_max_size=2000" logstash: - image: docker.elastic.co/logstash/logstash-oss:8.1.0-b989cb5c-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:8.1.0-c1a942c7-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -31,7 +31,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-b989cb5c-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-c1a942c7-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 9c508bb04ff..ae706a3d2e8 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-b989cb5c-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.1.0-c1a942c7-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:9200/_cat/health?h=status | grep -q green"] retries: 300 @@ -37,7 +37,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.1.0-b989cb5c-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.1.0-c1a942c7-SNAPSHOT healthcheck: test: ["CMD-SHELL", "curl -s http://localhost:5601/api/status?v8format=true | grep -q '\"overall\":{\"level\":\"available\"'"] retries: 600 From 1386a04099c81f791b334616437d71c56109e0e9 Mon Sep 17 00:00:00 2001 From: Kevin Lacabane Date: Wed, 29 Dec 2021 13:27:51 +0100 Subject: [PATCH 56/57] add elasticsearch.cluster.id to logstash metricsets (#29625) * add elasticsearch.cluster.id to logstash metricsets * changelog entry --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/fields.asciidoc | 7 +++++++ metricbeat/module/logstash/_meta/fields.yml | 2 ++ metricbeat/module/logstash/fields.go | 2 +- metricbeat/module/logstash/node/_meta/data.json | 7 ++++++- metricbeat/module/logstash/node/data.go | 1 + metricbeat/module/logstash/node_stats/_meta/data.json | 11 +++++++++-- metricbeat/module/logstash/node_stats/data.go | 3 ++- 8 files changed, 29 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6e0eb0967ac..e5152441e17 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -152,6 +152,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Add `container.id` and `container.runtime` ECS fields in container metricset. {pull}29560[29560] - Add `memory.workingset.limit.pct` field in Kubernetes container/pod metricset. {pull}29547[29547] - Add `elasticsearch.cluster.id` field to Beat and Kibana modules. {pull}29577[29577] +- Add `elasticsearch.cluster.id` field to Logstash module. {pull}29625[29625] *Packetbeat* diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index e1e256d0dc3..2a866020d37 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -46325,6 +46325,13 @@ alias to: logstash.node.state.pipeline.hash -- +*`logstash.elasticsearch.cluster.id`*:: ++ +-- +type: keyword + +-- + [float] === node diff --git a/metricbeat/module/logstash/_meta/fields.yml b/metricbeat/module/logstash/_meta/fields.yml index caa5d152974..850e5b0ba0d 100644 --- a/metricbeat/module/logstash/_meta/fields.yml +++ b/metricbeat/module/logstash/_meta/fields.yml @@ -104,3 +104,5 @@ - name: logstash type: group fields: + - name: elasticsearch.cluster.id + type: keyword diff --git a/metricbeat/module/logstash/fields.go b/metricbeat/module/logstash/fields.go index c2a1e208929..0f9ed630565 100644 --- a/metricbeat/module/logstash/fields.go +++ b/metricbeat/module/logstash/fields.go @@ -32,5 +32,5 @@ func init() { // AssetLogstash returns asset data. // This is the base64 encoded zlib format compressed contents of module/logstash. func AssetLogstash() string { - return "eJy8Wc2Oq0YT3fspSl5/F+lbZONFlEUSZaJEyepuogj1QBn3vUB3+sfJ5OkjsMFA/xWGGRazaFznnKouqqtrPsFXfDtBLSptmL4cAAw3NZ7g+Mt96XgAKFEXikvDRXuCbw8AAMNraERpazwAKKyRaTxBxQ4AGo3hbaVP8MdR6/r4PzhejJHHP7t3F6FMXoj2zKsTnFmtO/szx7rUpx79E7SswYeuXBtmdP8KwLzJjkUJK+8rU9OpueENasMaOb4ZrFnNmZ6sSmYuJzh+N1ocHbAv18aBmYrwCZnaN9jM1kMYIZwp1gWZzK3GMudt/vpmUDs/DXk693gIcNaKErM+ytmXa5M12GQEkpmehv3z7nKCHIMSK7st7H7T8LrmS6qwiLiAIOzAi1dsjd6UILzdQexNR7bAGiiENftxLMEGktIq1pWKXTfhzhnBXtaMTXthLS93UD2uOXgD0RWV5mKPnR/XlpADleQSa96im6UtaoOlYyC25XMh7W4Fr3MxWFN8gDHQKXBrm1dUuTjnWDPZFTuJiovSV6MgWcUgukNCZ0WvNSuk7deydfTTcy03FyWMqbHMW9aKD9JLZnbj2x+tD9uPjm+K/lE+WJmzKypW4btk3P+/WfYBe/kubTZVn4WYRiUfJiSu48MCsiAaC5VvO5+vVYW0rChMZjWrMPiJPNsOzZI8yDPWfCUK1LdQSFQFtobYC/u4Y2gD418WLd6P67wQdhNhAMx7PcDV14PhQMxmZ/M6jZj5UByGi68heYJjxAl0O3TnO/Tk8e65+A3Pwn5++xue9yS/3QmhQaN4obPVWmDRXjyivFslcHrIB9xXfPtbqPAxtEiYtPFoKPTaHr8zybo/HcLiFw2vbh33CYyyy7eRLeqen4Q24IBua301qisv0Olyd5A7zjQ+e7D9YwCI5keC7+fPv8JLexYrs8rvOSRTK6mne3yuw6yihXM6fpgNh4cfIb5xRPG/3yjg5fvgV+6/iz7zcbuJEMeLYQJ9gjEnqkVbRdHcqRNNaUotrJ1GEXU7uImpEgE2OKKZ24YiQci7H3pkb34Bcded4Q/BMbK+7nlpC9HwtrqHAfpuClUWVeXOi/aW9Zs1lVgr68xrgypxh9yu7cc7zSptyekXQWVklDW33lRn3BHXHDx8jlDOojSOd8y0n3/u+ImGncIH+rCATkghBdIQYc4ZrbNAmQU8ibdVX/RuPsfYYyNpd3aidiBlIc0BihOQHpSuJ6USw4ZhqqsqmV/w3Bh0I9PaKSaRjjalmQOVwr7WbrGJDfnnAIth/+MhtSnbj4zAzXYdSD8YeveqEJhi+QlJ5aD7aRIpFgBYBiHX/F/cufHueu5dwZMtOOy6b+G2laAVnmnmCLiTlsnwIhjN/aKwp3hwv4rgjWUl3FCxcpQXbFCxOg9WGSB/I4smfx3JwtPN6bQQQ4BN1lt4NhIEt2+fvrT6kj+RQ+daRFuQmZp1VKT/MczV9PvyXwAAAP//XRLRjg==" + return "eJy8Wb2OrEYTzfcpShN/F+kLnExgObAtr2XLjm5iWagXaqDvbeh2/4y9fnoLZmCB/qEY2CWYAKhzTlcV1dU1n+Arvp5ByMpYZuonAMutwDOcfrnfOj0BlGgKzZXlsj3Dt08AAMNjaGTpBD4BaBTIDJ6hYk8ABq3lbWXO8MfJGHH6H5xqa9Xpz+5ZLbXNC9leeHWGCxOms79wFKU59+ifoGUNvunKjWXW9I8A7KvqWLR06n5najo1t7xBY1mjxieDNROcmcldxWx9htN3o8XJA/tybTyYqYiQkKl9g83sfgwjhjPFqpGp3Bksc97mL68WjfdqbKXzFQ8OzlpZYtZ7OftybbIGm4xAMtPTsH/eXU6UY1DiVBfC7p2GC8GXVHERaQFR2IEXr9hasytBeHuA2JuObIE1UEhnj+NYgg0kpdOsKxWHBuHOmcBe1oxdsXCOlweoHu95eAPRFbXh8ojIj/eWkAOV4goFb9HP0haNxdIzkPvyuVDusILXLTFaU0KAKdApcOuaF9S5vOQomOqKnULNZRmqUbBaxSAZIWmyoteaFcr197Jt9NN9Lbe1ltYKLPOWtfKD9JKZff/2W+ub7Uf7d43+rXywMmdX1KzCd8m4/3+z7AOOWrty2VR9FmMalXyYkLSOD3PIgmgsVKFwPl6rCuVYUdjMGVZh9BN5tB2aJXmUZ6z5WhZobq5QqAtsLbEXDnGn0AbGvxw6vG/XeSHdLsIIWPB4gJuPB8OGmM325m0aMQuheAx1qCF5gGPEiXQ79MWjYMbywiDTRZ0VwhmLOuSJr/j6t9T+2jp5q/1B4OQ4XAv7+fFxuGLqjyC/HSqhQat5YbLNWmDRn7yF6bBS4jWhEA3L0nSRcevGo6E0Ww8JnUnW/XQIizcaXt1a9jNY7ZZPEyHqrp+kseCB7uudDeorL9Brkw+QOw5FPgeww3MESObHCt/Pn3+F5/YiN2ZVeOWwmlqrerortHSYlcR4Tqd3w2H3CSOkA0cU//uNAp6/j37l4cPsIx+3nwhpvBQm0EcgcyIh2yqJ5o+taErX1MLWcRZRt4e7MpYiwEZnPHPbmCcIefdDjxzMLyBG3ZseERZG1tddz20hG95WdzdA346hzpKq/IHT0bJ+c7aSW2VduLCoVw6h+7X9eKfZpG11fEZQmZiFza131Rl/RjYHj+8jlL1oHSc4pzpuff78ioa9hg/0aQOdkEIKpCnEnDNZZ4EyTHgQb6++5OF+jnFEIGmHfqJ2IGUhbQGURcD6pHU7KZUYdkxjfVWr+QWPzVF3Mm0dgxLpaGOeOVAp3Yvwi03qX4I5wOLfgreL1Kbs3zIiJ9ttIP1k6d2rQmQMFiYklYPu1VWklANg6YTc8H/x4Ma767kPBV9tweHQuMXbVoJWeKSZI+BOWibLi6g3j/PCkeLB/yqiJ5aNcEPFylHV2KBmIo9WGSB/I4smfxvJYqW702khhgC7Wm/hUU8Qln379JUzdf5ADl2ETLYgMzXbqEh/UszV9HH5LwAA//8MYuTR" } diff --git a/metricbeat/module/logstash/node/_meta/data.json b/metricbeat/module/logstash/node/_meta/data.json index a847bea944a..cfc36e09d31 100644 --- a/metricbeat/module/logstash/node/_meta/data.json +++ b/metricbeat/module/logstash/node/_meta/data.json @@ -9,6 +9,11 @@ "cluster": { "id": "VUwnkX_lTzCFP9VUoXT1IQ" }, + "elasticsearch": { + "cluster": { + "id": "VUwnkX_lTzCFP9VUoXT1IQ" + } + }, "node": { "host": "7dc1b688baf4", "id": "9a1f83e1-52b9-4625-a98a-6aa336f41719", @@ -101,4 +106,4 @@ "type": "logstash", "version": "7.12.0" } -} \ No newline at end of file +} diff --git a/metricbeat/module/logstash/node/data.go b/metricbeat/module/logstash/node/data.go index bb9f51c9847..1d5725ed969 100644 --- a/metricbeat/module/logstash/node/data.go +++ b/metricbeat/module/logstash/node/data.go @@ -122,6 +122,7 @@ func eventMapping(r mb.ReporterV2, content []byte, pipelines []logstash.Pipeline if clusterUUID != "" { event.ModuleFields.Put("cluster.id", clusterUUID) + event.ModuleFields.Put("elasticsearch.cluster.id", clusterUUID) } event.ID = pipeline.EphemeralID diff --git a/metricbeat/module/logstash/node_stats/_meta/data.json b/metricbeat/module/logstash/node_stats/_meta/data.json index 5343737c3b4..b218c10246e 100644 --- a/metricbeat/module/logstash/node_stats/_meta/data.json +++ b/metricbeat/module/logstash/node_stats/_meta/data.json @@ -6,7 +6,14 @@ "module": "logstash" }, "logstash": { - "cluster.id": "VUwnkX_lTzCFP9VUoXT1IQ", + "cluster": { + "id": "VUwnkX_lTzCFP9VUoXT1IQ" + }, + "elasticsearch": { + "cluster": { + "id": "VUwnkX_lTzCFP9VUoXT1IQ" + } + }, "node": { "stats": { "events": { @@ -136,4 +143,4 @@ "type": "logstash", "version": "7.12.0" } -} \ No newline at end of file +} diff --git a/metricbeat/module/logstash/node_stats/data.go b/metricbeat/module/logstash/node_stats/data.go index 85eaaa8b493..d91e2724548 100644 --- a/metricbeat/module/logstash/node_stats/data.go +++ b/metricbeat/module/logstash/node_stats/data.go @@ -204,7 +204,8 @@ func eventMapping(r mb.ReporterV2, content []byte, isXpack bool) error { event.RootFields.Put("service.version", nodeStats.Version) if clusterUUID != "" { - event.ModuleFields["cluster.id"] = clusterUUID + event.ModuleFields.Put("cluster.id", clusterUUID) + event.ModuleFields.Put("elasticsearch.cluster.id", clusterUUID) } // xpack.enabled in config using standalone metricbeat writes to `.monitoring` instead of `metricbeat-*` From 84924e9f32d018e360303714c26f754dbab34f93 Mon Sep 17 00:00:00 2001 From: Adrian Serrano Date: Thu, 30 Dec 2021 03:59:45 +0100 Subject: [PATCH 57/57] Ensure that match_only_text and wildcard fields can be added to default_field list (#29634) Fixes the construction of the index template's default_field list so that fields of type match_only_text and wildcard can be added to it. --- CHANGELOG.next.asciidoc | 2 ++ libbeat/template/processor.go | 2 +- libbeat/template/processor_test.go | 14 ++++++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e5152441e17..bbf88e791f4 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -78,6 +78,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* +- Fields of type `match_only_text` (i.e. `message`) and `wildcard` were missing from the template's default_field list. {issue}29633[29633] {pull}29634[29634] + *Auditbeat* diff --git a/libbeat/template/processor.go b/libbeat/template/processor.go index cb13a95aa2d..9e4214ac04e 100644 --- a/libbeat/template/processor.go +++ b/libbeat/template/processor.go @@ -136,7 +136,7 @@ func (p *Processor) Process(fields mapping.Fields, state *fieldState, output, an if *field.DefaultField { switch field.Type { - case "", "keyword", "text": + case "", "keyword", "text", "match_only_text", "wildcard": addToDefaultFields(&field) } } diff --git a/libbeat/template/processor_test.go b/libbeat/template/processor_test.go index fefeb9b4313..4b35da89815 100644 --- a/libbeat/template/processor_test.go +++ b/libbeat/template/processor_test.go @@ -719,6 +719,18 @@ func TestProcessDefaultField(t *testing.T) { }, }, }, + // Ensure that text_only_keyword fields can be added to default_field + mapping.Field{ + Name: "a_match_only_text_field", + Type: "match_only_text", + DefaultField: &enableDefaultField, + }, + // Ensure that wildcard fields can be added to default_field + mapping.Field{ + Name: "a_wildcard_field", + Type: "wildcard", + DefaultField: &enableDefaultField, + }, } version, err := common.NewVersion("7.0.0") @@ -734,6 +746,8 @@ func TestProcessDefaultField(t *testing.T) { } expectedFields := []string{ + "a_match_only_text_field", + "a_wildcard_field", "bar", "nested.bar", "nested.foo",