diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9fb6706d4c1f..2c0d485b63bf 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -50,8 +50,8 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Remove deprecated awscloudwatch field from Filebeat. {pull}41089[41089] - The performance of ingesting SQS data with the S3 input has improved by up to 60x for queues with many small events. `max_number_of_messages` config for SQS mode is now ignored, as the new design no longer needs a manual cap on messages. Instead, use `number_of_workers` to scale ingestion rate in both S3 and SQS modes. The increased efficiency may increase network bandwidth consumption, which can be throttled by lowering `number_of_workers`. It may also increase number of events stored in memory, which can be throttled by lowering the configured size of the internal queue. {pull}40699[40699] - Fixes filestream logging the error "filestream input with ID 'ID' already exists, this will lead to data duplication[...]" on Kubernetes when using autodiscover. {pull}41585[41585] - - Add kafka compression support for ZSTD. +- Filebeat fails to start if there is any input with a duplicated ID. It logs the duplicated IDs and the offending inputs configurations. {pull}41731[41731] *Heartbeat* @@ -184,6 +184,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix the "No such input type exist: 'salesforce'" error on the Windows/AIX platform. {pull}41664[41664] - Fix missing key in streaming input logging. {pull}41600[41600] - Improve S3 object size metric calculation to support situations where Content-Length is not available. {pull}41755[41755] +- Fix handling of http_endpoint request exceeding memory limits. {issue}41764[41764] {pull}41765[41765] *Heartbeat* @@ -216,6 +217,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix Kubernetes metadata sometimes not being present after startup {pull}41216[41216] - Do not report non-existant 0 values for RSS metrics in docker/memory {pull}41449[41449] - Log Cisco Meraki `getDevicePerformanceScores` errors without stopping metrics collection. {pull}41622[41622] +- Don't skip first bucket value in GCP metrics metricset for distribution type metrics {pull}41822[41822] *Osquerybeat* @@ -345,7 +347,9 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add support for Journald in the System module. {pull}41555[41555] - Add ability to remove request trace logs from http_endpoint input. {pull}40005[40005] - Add ability to remove request trace logs from entityanalytics input. {pull}40004[40004] +- Refactor & cleanup with updates to default values and documentation. {pull}41834[41834] - Update CEL mito extensions to v1.16.0. {pull}41727[41727] +- Add evaluation state dump debugging option to CEL input. {pull}41335[41335] - Introduce ignore older and start timestamp filters for AWS S3 input. {pull}41804[41804] *Auditbeat* @@ -397,6 +401,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Bump aerospike-client-go to version v7.7.1 and add support for basic auth in Aerospike module {pull}41233[41233] - Only watch metadata for ReplicaSets in metricbeat k8s module {pull}41289[41289] - Add support for region/zone for Vertex AI service in GCP module {pull}41551[41551] +- Add support for location label as an optional configuration parameter in GCP metrics metricset. {issue}41550[41550] {pull}41626[41626] *Metricbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 881094596de9..1b92ea7e195f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2745,36 +2745,6 @@ OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/elastic/sarama -Version: v1.19.1-0.20220310193331-ebc2b0d8eef3 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elastic/sarama@v1.19.1-0.20220310193331-ebc2b0d8eef3/LICENSE: - -Copyright (c) 2013 Shopify - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/StackExchange/wmi Version: v1.2.1 @@ -10159,38 +10129,6 @@ THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/bsm/sarama-cluster -Version: v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/bsm/sarama-cluster@v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible/LICENSE: - -(The MIT License) - -Copyright (c) 2017 Black Square Media Ltd - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/cavaliergopher/rpm Version: v1.2.0 @@ -11313,43 +11251,6 @@ Apache License --------------------------------------------------------------------------------- -Dependency : github.com/denisenkom/go-mssqldb -Version: v0.12.3 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/denisenkom/go-mssqldb@v0.12.3/LICENSE.txt: - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/devigned/tab Version: v0.1.2-0.20190607222403-0c15cf42f9a2 @@ -12880,11 +12781,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/eapache/go-resiliency -Version: v1.2.0 +Version: v1.7.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/eapache/go-resiliency@v1.2.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/eapache/go-resiliency@v1.7.0/LICENSE: The MIT License (MIT) @@ -16704,6 +16605,40 @@ See the License for the specific language governing permissions and limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/sarama +Version: v1.19.1-0.20241120141909-c7eabfcee7e5 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/sarama@v1.19.1-0.20241120141909-c7eabfcee7e5/LICENSE.md: + +# MIT License + +Copyright (c) 2013 Shopify + +Copyright (c) 2023 IBM Corporation + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/tk-btf Version: v0.1.0 @@ -20799,11 +20734,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/jcmturner/gokrb5/v8 -Version: v8.4.2 +Version: v8.4.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/jcmturner/gokrb5/v8@v8.4.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/jcmturner/gokrb5/v8@v8.4.4/LICENSE: Apache License Version 2.0, January 2004 @@ -21885,6 +21820,44 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/microsoft/go-mssqldb +Version: v1.7.2 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/microsoft/go-mssqldb@v1.7.2/LICENSE.txt: + +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) Microsoft Corporation. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/miekg/dns Version: v1.1.61 @@ -31402,6 +31375,66 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys +Version: v1.0.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/security/keyvault/azkeys@v1.0.1/LICENSE.txt: + + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + +-------------------------------------------------------------------------------- +Dependency : github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/security/keyvault/internal@v1.0.0/LICENSE.txt: + + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + -------------------------------------------------------------------------------- Dependency : github.com/Azure/go-amqp Version: v1.0.5 @@ -33135,6 +33168,40 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure!a!d/microsoft-au SOFTWARE +-------------------------------------------------------------------------------- +Dependency : github.com/IBM/sarama +Version: v1.43.3 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!i!b!m/sarama@v1.43.3/LICENSE.md: + +# MIT License + +Copyright (c) 2013 Shopify + +Copyright (c) 2023 IBM Corporation + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/JohnCGriffin/overflow Version: v0.0.0-20211019200055-46fa312c352c @@ -33172,38 +33239,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/Shopify/toxiproxy -Version: v2.1.4+incompatible -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/!shopify/toxiproxy@v2.1.4+incompatible/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Shopify - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - - -------------------------------------------------------------------------------- Dependency : github.com/akavel/rsrc Version: v0.8.0 @@ -39444,11 +39479,11 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-metrics@v0.0. -------------------------------------------------------------------------------- Dependency : github.com/eapache/go-xerial-snappy -Version: v0.0.0-20180814174437-776d5712da21 +Version: v0.0.0-20230731223053-c322873962e3 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/eapache/go-xerial-snappy@v0.0.0-20180814174437-776d5712da21/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/eapache/go-xerial-snappy@v0.0.0-20230731223053-c322873962e3/LICENSE: The MIT License (MIT) @@ -42038,11 +42073,11 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI -------------------------------------------------------------------------------- Dependency : github.com/golang-sql/civil -Version: v0.0.0-20190719163853-cb61b32ac6fe +Version: v0.0.0-20220223132316-b832511892a9 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/golang-sql/civil@v0.0.0-20190719163853-cb61b32ac6fe/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/golang-sql/civil@v0.0.0-20220223132316-b832511892a9/LICENSE: Apache License @@ -46261,11 +46296,13 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice -------------------------------------------------------------------------------- Dependency : github.com/hashicorp/go-uuid -Version: v1.0.2 +Version: v1.0.3 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-uuid@v1.0.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-uuid@v1.0.3/LICENSE: + +Copyright © 2015-2022 HashiCorp, Inc. Mozilla Public License, version 2.0 @@ -47700,11 +47737,11 @@ Contents of probable licence file $GOMODCACHE/github.com/jcmturner/dnsutils/v2@v -------------------------------------------------------------------------------- Dependency : github.com/jcmturner/gofork -Version: v1.0.0 +Version: v1.7.6 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/jcmturner/gofork@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/jcmturner/gofork@v1.7.6/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -52012,44 +52049,6 @@ Contents of probable licence file $GOMODCACHE/github.com/oxtoacart/bpool@v0.0.0- limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/pierrec/lz4 -Version: v2.6.0+incompatible -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/pierrec/lz4@v2.6.0+incompatible/LICENSE: - -Copyright (c) 2015, Pierre Curto -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -------------------------------------------------------------------------------- Dependency : github.com/pkg/browser Version: v0.0.0-20240102092130-5ac0b6a4141c diff --git a/catalog-info.yaml b/catalog-info.yaml index a0eca0c2c9f6..16d4bd7e0d0b 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -31,7 +31,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -81,7 +81,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -128,7 +128,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -175,7 +175,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -222,7 +222,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -269,7 +269,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -316,7 +316,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -363,7 +363,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -410,7 +410,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -456,7 +456,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -503,7 +503,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -550,7 +550,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -597,7 +597,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -644,7 +644,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -690,7 +690,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -725,7 +725,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -761,7 +761,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -808,7 +808,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -855,7 +855,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -902,7 +902,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -949,7 +949,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -996,7 +996,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -1043,7 +1043,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -1092,7 +1092,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -1128,7 +1128,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -1162,7 +1162,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline @@ -1205,7 +1205,7 @@ metadata: spec: type: buildkite-pipeline owner: group:ingest-fp - system: buildkite + system: platform-ingest implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline diff --git a/docs/devguide/testing.asciidoc b/docs/devguide/testing.asciidoc index 9488fe47dcee..07f2ae21025c 100644 --- a/docs/devguide/testing.asciidoc +++ b/docs/devguide/testing.asciidoc @@ -25,6 +25,8 @@ Integration tests are labelled with the `//go:build integration` build tag and u To run the integration tests use the `mage goIntegTest` target, which will start the required services using https://docs.docker.com/compose/[docker-compose] and run all integration tests. +It is also possible to run module specific integration tests. For example, to run kafka only tests use `MODULE=kafka mage integTest -v` + It is possible to start the `docker-compose` services manually to allow selecting which specific tests should be run. An example follows for filebeat: [source,bash] diff --git a/filebeat/beater/filebeat.go b/filebeat/beater/filebeat.go index 815b6fabfde2..ceab21aa3590 100644 --- a/filebeat/beater/filebeat.go +++ b/filebeat/beater/filebeat.go @@ -32,6 +32,7 @@ import ( "github.com/elastic/beats/v7/filebeat/fileset" _ "github.com/elastic/beats/v7/filebeat/include" "github.com/elastic/beats/v7/filebeat/input" + "github.com/elastic/beats/v7/filebeat/input/filestream" "github.com/elastic/beats/v7/filebeat/input/filestream/takeover" v2 "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/filebeat/input/v2/compat" @@ -291,6 +292,11 @@ func (fb *Filebeat) Run(b *beat.Beat) error { } defer stateStore.Close() + err = filestream.ValidateInputIDs(config.Inputs, logp.NewLogger("input.filestream")) + if err != nil { + logp.Err("invalid filestream configuration: %+v", err) + return err + } err = processLogInputTakeOver(stateStore, config) if err != nil { logp.Err("Failed to attempt filestream state take over: %+v", err) diff --git a/filebeat/input/filestream/config.go b/filebeat/input/filestream/config.go index 1cb8fa5da979..2860dd673c23 100644 --- a/filebeat/input/filestream/config.go +++ b/filebeat/input/filestream/config.go @@ -19,6 +19,7 @@ package filestream import ( "fmt" + "strings" "time" "github.com/dustin/go-humanize" @@ -27,6 +28,7 @@ import ( "github.com/elastic/beats/v7/libbeat/reader/parser" "github.com/elastic/beats/v7/libbeat/reader/readfile" conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" ) // Config stores the options of a file stream. @@ -142,3 +144,60 @@ func (c *config) Validate() error { return nil } + +// ValidateInputIDs checks all filestream inputs to ensure all input IDs are +// unique. If there is a duplicated ID, it logs an error containing the offending +// input configurations and returns an error containing the duplicated IDs. +// A single empty ID is a valid ID as it's unique, however multiple empty IDs +// are not unique and are therefore are treated as any other duplicated ID. +func ValidateInputIDs(inputs []*conf.C, logger *logp.Logger) error { + duplicatedConfigs := make(map[string][]*conf.C) + var duplicates []string + for _, input := range inputs { + fsInput := struct { + ID string `config:"id"` + Type string `config:"type"` + }{} + err := input.Unpack(&fsInput) + if err != nil { + return fmt.Errorf("failed to unpack filestream input configuration: %w", err) + } + if fsInput.Type == "filestream" { + duplicatedConfigs[fsInput.ID] = append(duplicatedConfigs[fsInput.ID], input) + // we just need to collect the duplicated IDs once, therefore collect + // it only the first time we see a duplicated ID. + if len(duplicatedConfigs[fsInput.ID]) == 2 { + duplicates = append(duplicates, fsInput.ID) + } + } + } + + if len(duplicates) != 0 { + jsonDupCfg := collectOffendingInputs(duplicates, duplicatedConfigs) + logger.Errorw("filestream inputs with duplicated IDs", "inputs", jsonDupCfg) + var quotedDuplicates []string + for _, dup := range duplicates { + quotedDuplicates = append(quotedDuplicates, fmt.Sprintf("%q", dup)) + } + return fmt.Errorf("filestream inputs validation error: filestream inputs with duplicated IDs: %v", strings.Join(quotedDuplicates, ",")) + } + + return nil +} + +func collectOffendingInputs(duplicates []string, ids map[string][]*conf.C) []map[string]interface{} { + var cfgs []map[string]interface{} + + for _, id := range duplicates { + for _, dupcfgs := range ids[id] { + toJson := map[string]interface{}{} + err := dupcfgs.Unpack(&toJson) + if err != nil { + toJson[id] = fmt.Sprintf("failed to unpack config: %v", err) + } + cfgs = append(cfgs, toJson) + } + } + + return cfgs +} diff --git a/filebeat/input/filestream/config_test.go b/filebeat/input/filestream/config_test.go index 6cf045060c94..729b712d58ea 100644 --- a/filebeat/input/filestream/config_test.go +++ b/filebeat/input/filestream/config_test.go @@ -18,9 +18,16 @@ package filestream import ( + "encoding/json" + "strings" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest/observer" + + conf "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/logp" ) func TestConfigValidate(t *testing.T) { @@ -30,3 +37,186 @@ func TestConfigValidate(t *testing.T) { require.Error(t, err) }) } + +func TestValidateInputIDs(t *testing.T) { + tcs := []struct { + name string + cfg []string + assertErr func(t *testing.T, err error) + assertLogs func(t *testing.T, buff *observer.ObservedLogs) + }{ + { + name: "empty config", + cfg: []string{""}, + assertErr: func(t *testing.T, err error) { + assert.NoError(t, err, "empty config should not return an error") + }, + }, + { + name: "one empty ID is allowed", + cfg: []string{` +type: filestream +`, ` +type: filestream +id: some-id-1 +`, ` +type: filestream +id: some-id-2 +`, + }, + assertErr: func(t *testing.T, err error) { + assert.NoError(t, err, "one empty id is allowed") + }, + }, + { + name: "duplicated empty ID", + cfg: []string{` +type: filestream +paths: + - "/tmp/empty-1" +`, ` +type: filestream +paths: + - "/tmp/empty-2" +`, ` +type: filestream +id: unique-id-1 +`, ` +type: filestream +id: unique-id-2 +`, ` +type: filestream +id: unique-ID +`, + }, + assertErr: func(t *testing.T, err error) { + assert.ErrorContains(t, err, `filestream inputs with duplicated IDs: ""`) + + }, + assertLogs: func(t *testing.T, obs *observer.ObservedLogs) { + want := `[{"paths":["/tmp/empty-1"],"type":"filestream"},{"paths":["/tmp/empty-2"],"type":"filestream"}]` + + logs := obs.TakeAll() + require.Len(t, logs, 1, "there should be only one log entry") + + got, err := json.Marshal(logs[0].ContextMap()["inputs"]) + require.NoError(t, err, "could not marshal duplicated IDs inputs") + assert.Equal(t, want, string(got)) + }, + }, { + name: "duplicated IDs", + cfg: []string{` +type: filestream +id: duplicated-id-1 +`, ` +type: filestream +id: duplicated-id-1 +`, ` +type: filestream +id: duplicated-id-2 +`, ` +type: filestream +id: duplicated-id-2 +`, ` +type: filestream +id: duplicated-id-2 +`, ` +type: filestream +id: unique-ID +`, + }, + assertErr: func(t *testing.T, err error) { + assert.ErrorContains(t, err, "filestream inputs with duplicated IDs") + assert.ErrorContains(t, err, "duplicated-id-1") + assert.ErrorContains(t, err, "duplicated-id-2") + assert.Equal(t, strings.Count(err.Error(), "duplicated-id-1"), 1, "each IDs should appear only once") + assert.Equal(t, strings.Count(err.Error(), "duplicated-id-2"), 1, "each IDs should appear only once") + + }, + assertLogs: func(t *testing.T, obs *observer.ObservedLogs) { + want := `[{"id":"duplicated-id-1","type":"filestream"},{"id":"duplicated-id-1","type":"filestream"},{"id":"duplicated-id-2","type":"filestream"},{"id":"duplicated-id-2","type":"filestream"},{"id":"duplicated-id-2","type":"filestream"}]` + + logs := obs.TakeAll() + require.Len(t, logs, 1, "there should be only one log entry") + + got, err := json.Marshal(logs[0].ContextMap()["inputs"]) + require.NoError(t, err, "could not marshal duplicated IDs inputs") + assert.Equal(t, want, string(got)) + }, + }, + { + name: "duplicated IDs and empty ID", + cfg: []string{` +type: filestream +`, ` +type: filestream +`, ` +type: filestream +id: duplicated-id-1 +`, ` +type: filestream +id: duplicated-id-1 +`, ` +type: filestream +id: duplicated-id-2 +`, ` +type: filestream +id: duplicated-id-2 +`, ` +type: filestream +id: unique-ID +`, + }, + assertErr: func(t *testing.T, err error) { + assert.ErrorContains(t, err, "filestream inputs with duplicated IDs") + }, + assertLogs: func(t *testing.T, obs *observer.ObservedLogs) { + want := `[{"type":"filestream"},{"type":"filestream"},{"id":"duplicated-id-1","type":"filestream"},{"id":"duplicated-id-1","type":"filestream"},{"id":"duplicated-id-2","type":"filestream"},{"id":"duplicated-id-2","type":"filestream"}]` + + logs := obs.TakeAll() + require.Len(t, logs, 1, "there should be only one log entry") + + got, err := json.Marshal(logs[0].ContextMap()["inputs"]) + require.NoError(t, err, "could not marshal duplicated IDs inputs") + assert.Equal(t, want, string(got)) + + }, + }, + { + name: "only unique IDs", + cfg: []string{` +type: filestream +id: unique-id-1 +`, ` +type: filestream +id: unique-id-2 +`, ` +type: filestream +id: unique-id-3 +`, + }, + assertErr: func(t *testing.T, err error) { + assert.NoError(t, err, "only unique IDs should not return an error") + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + var inputs []*conf.C + for _, c := range tc.cfg { + cfg, err := conf.NewConfigFrom(c) + require.NoError(t, err, "could not create input configuration") + inputs = append(inputs, cfg) + } + err := logp.DevelopmentSetup(logp.ToObserverOutput()) + require.NoError(t, err, "could not setup log for development") + + err = ValidateInputIDs(inputs, logp.L()) + tc.assertErr(t, err) + if tc.assertLogs != nil { + tc.assertLogs(t, logp.ObserverLogs()) + } + }) + } +} diff --git a/filebeat/input/filestream/fswatch_test.go b/filebeat/input/filestream/fswatch_test.go index 528caec79de3..9fae0481ca6a 100644 --- a/filebeat/input/filestream/fswatch_test.go +++ b/filebeat/input/filestream/fswatch_test.go @@ -36,7 +36,6 @@ import ( ) func TestFileWatcher(t *testing.T) { - t.Skip("Flaky test: https://github.com/elastic/beats/issues/41209") dir := t.TempDir() paths := []string{filepath.Join(dir, "*.log")} cfgStr := ` @@ -261,10 +260,10 @@ scanner: paths := []string{filepath.Join(dir, "*.log")} cfgStr := ` scanner: - check_interval: 10ms + check_interval: 50ms ` - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 1000*time.Millisecond) defer cancel() logp.DevelopmentSetup(logp.ToObserverOutput()) diff --git a/filebeat/input/kafka/config.go b/filebeat/input/kafka/config.go index 9d085af27d2e..c1db26733538 100644 --- a/filebeat/input/kafka/config.go +++ b/filebeat/input/kafka/config.go @@ -22,8 +22,6 @@ import ( "fmt" "time" - "github.com/Shopify/sarama" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/common/kafka" "github.com/elastic/beats/v7/libbeat/common/transport/kerberos" @@ -31,6 +29,7 @@ import ( "github.com/elastic/elastic-agent-libs/monitoring" "github.com/elastic/elastic-agent-libs/monitoring/adapter" "github.com/elastic/elastic-agent-libs/transport/tlscommon" + "github.com/elastic/sarama" ) type kafkaInputConfig struct { @@ -241,8 +240,8 @@ func (off *initialOffset) Unpack(value string) error { func (st rebalanceStrategy) asSaramaStrategy() sarama.BalanceStrategy { return map[rebalanceStrategy]sarama.BalanceStrategy{ - rebalanceStrategyRange: sarama.BalanceStrategyRange, - rebalanceStrategyRoundRobin: sarama.BalanceStrategyRoundRobin, + rebalanceStrategyRange: sarama.NewBalanceStrategyRange(), + rebalanceStrategyRoundRobin: sarama.NewBalanceStrategyRoundRobin(), }[st] } diff --git a/filebeat/input/kafka/input.go b/filebeat/input/kafka/input.go index e2a04b5fa499..6cc74514befb 100644 --- a/filebeat/input/kafka/input.go +++ b/filebeat/input/kafka/input.go @@ -30,8 +30,6 @@ import ( "github.com/elastic/beats/v7/libbeat/common/atomic" "github.com/elastic/elastic-agent-libs/mapstr" - "github.com/Shopify/sarama" - input "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/acker" @@ -42,6 +40,7 @@ import ( "github.com/elastic/beats/v7/libbeat/reader/parser" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/sarama" ) const pluginName = "kafka" diff --git a/filebeat/input/kafka/kafka_integration_test.go b/filebeat/input/kafka/kafka_integration_test.go index 0728004c16da..cc8b29361da9 100644 --- a/filebeat/input/kafka/kafka_integration_test.go +++ b/filebeat/input/kafka/kafka_integration_test.go @@ -35,9 +35,10 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" - "github.com/Shopify/sarama" "github.com/stretchr/testify/assert" + "github.com/elastic/sarama" + "github.com/elastic/beats/v7/libbeat/beat" _ "github.com/elastic/beats/v7/libbeat/outputs/codec/format" _ "github.com/elastic/beats/v7/libbeat/outputs/codec/json" @@ -460,7 +461,8 @@ func findMessage(t *testing.T, text string, msgs []testMessage) *testMessage { var msg *testMessage for _, m := range msgs { if text == m.message { - msg = &m + mCopy := m + msg = &mCopy break } } @@ -605,8 +607,10 @@ func run(t *testing.T, cfg *conf.C, client *beattest.ChanClient) (*kafkaInput, f t.Cleanup(cancel) pipeline := beattest.ConstClient(client) - input := inp.(*kafkaInput) - go input.Run(ctx, pipeline) + input, _ := inp.(*kafkaInput) + go func() { + _ = input.Run(ctx, pipeline) + }() return input, cancel } diff --git a/filebeat/tests/integration/filestream_test.go b/filebeat/tests/integration/filestream_test.go index 3ddb04a2c20c..45cf99fbfb61 100644 --- a/filebeat/tests/integration/filestream_test.go +++ b/filebeat/tests/integration/filestream_test.go @@ -26,6 +26,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/elastic/beats/v7/libbeat/tests/integration" ) @@ -105,3 +108,166 @@ func TestFilestreamCleanInactive(t *testing.T) { registryFile := filepath.Join(filebeat.TempDir(), "data", "registry", "filebeat", "log.json") filebeat.WaitFileContains(registryFile, `"op":"remove"`, time.Second) } + +func TestFilestreamValidationPreventsFilebeatStart(t *testing.T) { + duplicatedIDs := ` +filebeat.inputs: + - type: filestream + id: duplicated-id-1 + enabled: true + paths: + - /tmp/*.log + - type: filestream + id: duplicated-id-1 + enabled: true + paths: + - /var/log/*.log + +output.discard.enabled: true +logging: + level: debug + metrics: + enabled: false +` + emptyID := ` +filebeat.inputs: + - type: filestream + enabled: true + paths: + - /tmp/*.log + - type: filestream + enabled: true + paths: + - /var/log/*.log + +output.discard.enabled: true +logging: + level: debug + metrics: + enabled: false +` + multipleDuplicatedIDs := ` +filebeat.inputs: + - type: filestream + enabled: true + paths: + - /tmp/*.log + - type: filestream + enabled: true + paths: + - /var/log/*.log + + - type: filestream + id: duplicated-id-1 + enabled: true + paths: + - /tmp/duplicated-id-1.log + - type: filestream + id: duplicated-id-1 + enabled: true + paths: + - /tmp/duplicated-id-1-2.log + + + - type: filestream + id: unique-id-1 + enabled: true + paths: + - /tmp/unique-id-1.log + - type: filestream + id: unique-id-2 + enabled: true + paths: + - /var/log/unique-id-2.log + +output.discard.enabled: true +logging: + level: debug + metrics: + enabled: false +` + tcs := []struct { + name string + cfg string + }{ + { + name: "duplicated IDs", + cfg: duplicatedIDs, + }, + { + name: "duplicated empty ID", + cfg: emptyID, + }, + { + name: "two inputs without ID and duplicated IDs", + cfg: multipleDuplicatedIDs, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + + // Write configuration file and start Filebeat + filebeat.WriteConfigFile(tc.cfg) + filebeat.Start() + + // Wait for error log + filebeat.WaitForLogs( + "filestream inputs validation error", + 10*time.Second, + "Filebeat did not log a filestream input validation error") + + proc, err := filebeat.Process.Wait() + require.NoError(t, err, "filebeat process.Wait returned an error") + assert.False(t, proc.Success(), "filebeat should have failed to start") + + }) + } +} + +func TestFilestreamValidationSucceeds(t *testing.T) { + cfg := ` +filebeat.inputs: + - type: filestream + enabled: true + paths: + - /var/log/*.log + + - type: filestream + id: unique-id-1 + enabled: true + paths: + - /tmp/unique-id-1.log + - type: filestream + id: unique-id-2 + enabled: true + paths: + - /var/log/unique-id-2.log + +output.discard.enabled: true +logging: + level: debug + metrics: + enabled: false +` + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + + // Write configuration file and start Filebeat + filebeat.WriteConfigFile(cfg) + filebeat.Start() + + // Wait for error log + filebeat.WaitForLogs( + "Input 'filestream' starting", + 10*time.Second, + "Filebeat did log a validation error") +} diff --git a/go.mod b/go.mod index 10869d1a204b..a65edbcf230f 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,6 @@ require ( github.com/Microsoft/go-winio v0.6.2 github.com/PaesslerAG/gval v1.2.2 github.com/PaesslerAG/jsonpath v0.1.1 - github.com/Shopify/sarama v1.27.0 github.com/StackExchange/wmi v1.2.1 github.com/akavel/rsrc v0.8.0 // indirect github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 // indirect @@ -40,7 +39,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/sqs v1.34.5 github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 - github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible github.com/cavaliergopher/rpm v1.2.0 github.com/cespare/xxhash/v2 v2.3.0 github.com/cloudfoundry-community/go-cfclient v0.0.0-20190808214049-35bcce23fc5f @@ -49,7 +47,6 @@ require ( github.com/containerd/fifo v1.1.0 github.com/coreos/go-systemd/v22 v22.5.0 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f - github.com/denisenkom/go-mssqldb v0.12.3 github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2 github.com/digitalocean/go-libvirt v0.0.0-20240709142323-d8406205c752 github.com/docker/docker v27.3.1+incompatible @@ -60,7 +57,7 @@ require ( github.com/dop251/goja v0.0.0-20200831102558-9af81ddcf0e1 github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 github.com/dustin/go-humanize v1.0.1 - github.com/eapache/go-resiliency v1.2.0 + github.com/eapache/go-resiliency v1.7.0 github.com/eclipse/paho.mqtt.golang v1.3.5 github.com/elastic/elastic-agent-client/v7 v7.15.0 github.com/elastic/go-concert v0.3.0 @@ -193,6 +190,7 @@ require ( github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 github.com/elastic/mito v1.16.0 github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015 + github.com/elastic/sarama v1.19.1-0.20241120141909-c7eabfcee7e5 github.com/elastic/tk-btf v0.1.0 github.com/elastic/toutoumomoma v0.0.0-20240626215117-76e39db18dfb github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 @@ -206,9 +204,10 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/icholy/digest v0.1.22 - github.com/jcmturner/gokrb5/v8 v8.4.2 + github.com/jcmturner/gokrb5/v8 v8.4.4 github.com/klauspost/compress v1.17.11 github.com/meraki/dashboard-api-go/v3 v3.0.9 + github.com/microsoft/go-mssqldb v1.7.2 github.com/otiai10/copy v1.12.0 github.com/pierrec/lz4/v4 v4.1.21 github.com/pkg/xattr v0.4.9 @@ -279,7 +278,7 @@ require ( github.com/distribution/reference v0.6.0 // indirect github.com/dnephin/pflag v1.0.7 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect github.com/elastic/go-windows v1.0.2 // indirect @@ -302,7 +301,7 @@ require ( github.com/goccy/go-json v0.10.3 // indirect github.com/godror/knownpb v0.1.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -318,13 +317,13 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.2.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect - github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/goidentity/v6 v6.0.1 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -356,7 +355,6 @@ require ( github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/pierrec/lz4 v2.6.0+incompatible // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.20.2 // indirect @@ -408,8 +406,6 @@ require ( replace ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/consumption/armconsumption => github.com/elastic/azure-sdk-for-go/sdk/resourcemanager/consumption/armconsumption v1.1.0-elastic - - github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 github.com/apoydence/eachers => github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 //indirect, see https://github.com/elastic/beats/pull/29780 for details. github.com/dop251/goja => github.com/elastic/goja v0.0.0-20190128172624-dd2ac4456e20 github.com/fsnotify/fsevents => github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 diff --git a/go.sum b/go.sum index 1c9c9130e4c7..7187e1e6138e 100644 --- a/go.sum +++ b/go.sum @@ -48,13 +48,10 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 h1:0f6XnzroY1yCQQwxGf/n/2xlaBF02Qhof2as99dGNsY= @@ -75,6 +72,10 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1. github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= @@ -113,6 +114,8 @@ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzS github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= +github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -128,8 +131,6 @@ github.com/PaesslerAG/jsonpath v0.1.1 h1:c1/AToHQMVsduPAa4Vh6xp2U0evy4t8SWp8imEs github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= github.com/PaloAltoNetworks/pango v0.10.2 h1:Tjn6vIzzAq6Dd7N0mDuiP8w8pz8k5W9zz/TTSUQCsQY= github.com/PaloAltoNetworks/pango v0.10.2/go.mod h1:GztcRnVLur7G+VFG7Z5ZKNFgScLtsycwPMp1qVebE5g= -github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/aerospike/aerospike-client-go/v7 v7.7.1 h1:lcskBtPZYe6ESObhIEQEp4XO1axYZpaFD3ie4iwr6tg= @@ -242,8 +243,6 @@ github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDb github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/bluekeyes/go-gitdiff v0.7.1 h1:graP4ElLRshr8ecu0UtqfNTCHrtSyZd3DABQm/DWesQ= github.com/bluekeyes/go-gitdiff v0.7.1/go.mod h1:QpfYYO1E0fTVHVZAZKiRjtSGY9823iCdvGXBcEzHGbM= -github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible h1:4g18+HnTDwEtO0n7K8B1Kjq+04MEKJRkhJNQ/hb9d5A= -github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/cavaliergopher/rpm v1.2.0 h1:s0h+QeVK252QFTolkhGiMeQ1f+tMeIMhGl8B1HUmGUc= github.com/cavaliergopher/rpm v1.2.0/go.mod h1:R0q3vTqa7RUvPofAZYrnjJ63hh2vngjFfphuXiExVos= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -274,7 +273,6 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= @@ -283,8 +281,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+8Y+8shw= -github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2 h1:6+hM8KeYKV0Z9EIINNqIEDyyIRAcNc2FW+/TUYNmWyw= github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgraph-io/badger/v4 v4.4.0 h1:rA48XiDynZLyMdlaJl67p9+lqfqwxlgKtCpYLAio7Zk= @@ -301,7 +297,6 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= @@ -320,10 +315,10 @@ github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 h1:RrkoB0pT3gnj github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= -github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y= @@ -390,8 +385,8 @@ github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015 h1:z8cC8GASpPo8yKl github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015/go.mod h1:qH9DX/Dmflz6EAtaks/+2SsdQzecVAKE174Zl66hk7E= github.com/elastic/pkcs8 v1.0.0 h1:HhitlUKxhN288kcNcYkjW6/ouvuwJWd9ioxpjnD9jVA= github.com/elastic/pkcs8 v1.0.0/go.mod h1:ipsZToJfq1MxclVTwpG7U/bgeDtf+0HkUiOxebk95+0= -github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 h1:FzA0/n4iMt8ojGDGRoiFPSHFvvdVIvxOxyLtiFnrLBM= -github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= +github.com/elastic/sarama v1.19.1-0.20241120141909-c7eabfcee7e5 h1:U7rts7RrrzQSDKkMuECpw9QCafSn2nRp36eRnWyR14E= +github.com/elastic/sarama v1.19.1-0.20241120141909-c7eabfcee7e5/go.mod h1:EEdpKWvuZ46X7OEOENvSH5jEJXosi4fn7xjIeTajf+M= github.com/elastic/tk-btf v0.1.0 h1:T4rbsnfaRH/MZKSLwZFd3sndt/NexsQb0IXWtMQ9PAA= github.com/elastic/tk-btf v0.1.0/go.mod h1:caLQPEcMbyKmPUQb2AsbX3ZAj1yCz06lOxfhn0esLR8= github.com/elastic/toutoumomoma v0.0.0-20240626215117-76e39db18dfb h1:8SvKmGOYyxKi6jB0nvV1lpxEHfIS6tQ40x1BXBhKMsE= @@ -423,7 +418,6 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 h1:nLPjjvpUAODOR6vY/7o0hBIk8iTr19Fvmf8aFx/kC7A= github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= @@ -491,8 +485,8 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -516,7 +510,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.3 h1:HR0kYDX2RJZvAup8CsiJwxB4dTCSC0AaUq6S4SiLwUc= @@ -605,8 +598,9 @@ github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISH github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -628,12 +622,12 @@ github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFK github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= -github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= -github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= -github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -666,7 +660,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= @@ -707,6 +700,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA= +github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA= github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= @@ -743,7 +738,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= @@ -779,13 +773,10 @@ github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAq github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= -github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A= -github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0 h1:i5VIxp6QB8oWZ8IkK8zrDgeT6ORGIUeiN+61iETwJbI= github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0/go.mod h1:4xpMLz7RBWyB+ElzHu8Llua96TRCB3YwX+l5EP1wmHk= -github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -867,7 +858,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -899,8 +889,6 @@ github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -997,10 +985,7 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= @@ -1048,10 +1033,10 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= @@ -1237,7 +1222,6 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/gotestsum v1.7.0 h1:RwpqwwFKBAa2h+F6pMEGpE707Edld0etUD3GhqqhDNc= diff --git a/libbeat/common/kafka/sasl.go b/libbeat/common/kafka/sasl.go index 9a6b3314b8b2..ca9df078ebcd 100644 --- a/libbeat/common/kafka/sasl.go +++ b/libbeat/common/kafka/sasl.go @@ -21,7 +21,7 @@ import ( "fmt" "strings" - "github.com/Shopify/sarama" + "github.com/elastic/sarama" ) type SaslConfig struct { diff --git a/libbeat/common/kafka/version.go b/libbeat/common/kafka/version.go index 3df44b86216a..c8f0cca99fba 100644 --- a/libbeat/common/kafka/version.go +++ b/libbeat/common/kafka/version.go @@ -20,7 +20,7 @@ package kafka import ( "fmt" - "github.com/Shopify/sarama" + "github.com/elastic/sarama" ) // Version is a kafka version @@ -31,6 +31,8 @@ var ( // We also allow versions to be specified as a prefix, e.g. "1", // understood as referencing the most recent version starting with "1". // truncatedKafkaVersions stores a lookup of the abbreviations we accept. + + //Ref for version mapping - https://kafka.apache.org/downloads truncatedKafkaVersions = map[string]sarama.KafkaVersion{ "0.8.2": sarama.V0_8_2_2, "0.8": sarama.V0_8_2_2, @@ -57,7 +59,16 @@ var ( "2.4": sarama.V2_4_0_0, "2.5": sarama.V2_5_0_0, "2.6": sarama.V2_6_0_0, + "2.7": sarama.V2_7_0_0, + "2.8": sarama.V2_8_0_0, "2": sarama.V2_6_0_0, + + "3": sarama.V3_1_1_0, + "3.2": sarama.V3_2_0_0, + "3.3": sarama.V3_3_1_0, + "3.4": sarama.V3_4_0_0, + "3.5": sarama.V3_5_1_0, + "3.6": sarama.V3_6_0_0, } ) diff --git a/libbeat/common/kafka/version_test.go b/libbeat/common/kafka/version_test.go index 19d5b04ad233..ae0f0a385e05 100644 --- a/libbeat/common/kafka/version_test.go +++ b/libbeat/common/kafka/version_test.go @@ -20,7 +20,7 @@ package kafka import ( "testing" - "github.com/Shopify/sarama" + "github.com/elastic/sarama" ) func TestVersionGet(t *testing.T) { @@ -62,7 +62,7 @@ func TestSaramaUpdate(t *testing.T) { // If any of these versions are considered valid by our parsing code, // it means someone updated sarama without updating the parsing code // for the new version. Gently remind them. - flagVersions := []Version{"2.8.1", "2.9.0"} + flagVersions := []Version{"3.7.0", "3.8.0"} for _, v := range flagVersions { if _, ok := v.Get(); ok { t.Fatalf( @@ -71,7 +71,7 @@ func TestSaramaUpdate(t *testing.T) { "- Update truncatedKafkaVersions in libbeat/common/kafka/version.go\n"+ "- Update the documentation to list the latest version:\n"+ " * libbeat/outputs/kafka/docs/kafka.asciidoc\n"+ - " * filebeat/docs/inputs/inputs-kafka.asciidoc\n"+ + " * filebeat/docs/inputs/input-kafka.asciidoc\n"+ "- Update TestSaramaUpdate in libbeat/common/kafka/version_test.go\n", v) diff --git a/libbeat/outputs/kafka/client.go b/libbeat/outputs/kafka/client.go index 1780f1392b31..76d5f9b8a78c 100644 --- a/libbeat/outputs/kafka/client.go +++ b/libbeat/outputs/kafka/client.go @@ -26,9 +26,10 @@ import ( "sync/atomic" "time" - "github.com/Shopify/sarama" "github.com/eapache/go-resiliency/breaker" + "github.com/elastic/sarama" + "github.com/elastic/beats/v7/libbeat/common/fmtstr" "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/outputs/codec" @@ -113,7 +114,7 @@ func newKafkaClient( return c, nil } -func (c *client) Connect() error { +func (c *client) Connect(_ context.Context) error { c.mux.Lock() defer c.mux.Unlock() @@ -257,7 +258,11 @@ func (c *client) successWorker(ch <-chan *sarama.ProducerMessage) { defer c.log.Debug("Stop kafka ack worker") for libMsg := range ch { - msg := libMsg.Metadata.(*message) + msg, ok := libMsg.Metadata.(*message) + if !ok { + c.log.Debug("Failed to assert libMsg.Metadata to *message") + return + } msg.ref.done() } } @@ -268,7 +273,11 @@ func (c *client) errorWorker(ch <-chan *sarama.ProducerError) { defer c.log.Debug("Stop kafka error handler") for errMsg := range ch { - msg := errMsg.Msg.Metadata.(*message) + msg, ok := errMsg.Msg.Metadata.(*message) + if !ok { + c.log.Debug("Failed to assert libMsg.Metadata to *message") + return + } msg.ref.fail(msg, errMsg.Err) if errors.Is(errMsg.Err, breaker.ErrBreakerOpen) { diff --git a/libbeat/outputs/kafka/config.go b/libbeat/outputs/kafka/config.go index 4bdd63d59c50..9300e385aa15 100644 --- a/libbeat/outputs/kafka/config.go +++ b/libbeat/outputs/kafka/config.go @@ -25,8 +25,6 @@ import ( "strings" "time" - "github.com/Shopify/sarama" - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" "github.com/elastic/beats/v7/libbeat/common/fmtstr" "github.com/elastic/beats/v7/libbeat/common/kafka" @@ -38,6 +36,7 @@ import ( "github.com/elastic/elastic-agent-libs/monitoring" "github.com/elastic/elastic-agent-libs/monitoring/adapter" "github.com/elastic/elastic-agent-libs/transport/tlscommon" + "github.com/elastic/sarama" ) type backoffConfig struct { diff --git a/libbeat/outputs/kafka/kafka.go b/libbeat/outputs/kafka/kafka.go index e8c0d75aa4d6..02dadafd9723 100644 --- a/libbeat/outputs/kafka/kafka.go +++ b/libbeat/outputs/kafka/kafka.go @@ -20,14 +20,13 @@ package kafka import ( "fmt" - "github.com/Shopify/sarama" - "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/outputs" "github.com/elastic/beats/v7/libbeat/outputs/codec" "github.com/elastic/beats/v7/libbeat/outputs/outil" "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/sarama" ) const ( diff --git a/libbeat/outputs/kafka/kafka_integration_test.go b/libbeat/outputs/kafka/kafka_integration_test.go index e9abc559774d..e0f42e2667aa 100644 --- a/libbeat/outputs/kafka/kafka_integration_test.go +++ b/libbeat/outputs/kafka/kafka_integration_test.go @@ -30,9 +30,10 @@ import ( "testing" "time" - "github.com/Shopify/sarama" "github.com/stretchr/testify/assert" + "github.com/elastic/sarama" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/fmtstr" "github.com/elastic/beats/v7/libbeat/outputs" @@ -280,7 +281,7 @@ func TestKafkaPublish(t *testing.T) { output, ok := grp.Clients[0].(*client) assert.True(t, ok, "grp.Clients[0] didn't contain a ptr to client") - if err := output.Connect(); err != nil { + if err := output.Connect(context.Background()); err != nil { t.Fatal(err) } assert.Equal(t, output.index, "testbeat") diff --git a/libbeat/outputs/kafka/log.go b/libbeat/outputs/kafka/log.go index 2f7bfa948483..1745bf2000fa 100644 --- a/libbeat/outputs/kafka/log.go +++ b/libbeat/outputs/kafka/log.go @@ -18,9 +18,8 @@ package kafka import ( - "github.com/Shopify/sarama" - "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/sarama" ) type kafkaLogger struct { diff --git a/libbeat/outputs/kafka/message.go b/libbeat/outputs/kafka/message.go index bf77f32ac25d..af034736b666 100644 --- a/libbeat/outputs/kafka/message.go +++ b/libbeat/outputs/kafka/message.go @@ -20,9 +20,8 @@ package kafka import ( "time" - "github.com/Shopify/sarama" - "github.com/elastic/beats/v7/libbeat/publisher" + "github.com/elastic/sarama" ) type message struct { @@ -40,8 +39,6 @@ type message struct { data publisher.Event } -var kafkaMessageKey interface{} = int(0) - func (m *message) initProducerMessage() { m.msg = sarama.ProducerMessage{ Metadata: m, diff --git a/libbeat/outputs/kafka/partition.go b/libbeat/outputs/kafka/partition.go index 8f56fde53e41..d2bf311132fd 100644 --- a/libbeat/outputs/kafka/partition.go +++ b/libbeat/outputs/kafka/partition.go @@ -26,11 +26,10 @@ import ( "math/rand" "strconv" - "github.com/Shopify/sarama" - "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/sarama" ) type partitionBuilder func(*logp.Logger, *config.C) (func() partitioner, error) @@ -117,7 +116,11 @@ func (p *messagePartitioner) Partition( libMsg *sarama.ProducerMessage, numPartitions int32, ) (int32, error) { - msg := libMsg.Metadata.(*message) + msg, ok := libMsg.Metadata.(*message) + if !ok { + return 0, fmt.Errorf("failed to assert libMsg.Metadata to *message") + } + if numPartitions == p.partitions { // if reachable is false, this is always true if 0 <= msg.partition && msg.partition < numPartitions { return msg.partition, nil @@ -126,13 +129,13 @@ func (p *messagePartitioner) Partition( partition, err := p.p(msg, numPartitions) if err != nil { - return 0, nil + return 0, nil //nolint:nilerr //ignoring this error } msg.partition = partition if _, err := msg.data.Cache.Put("partition", partition); err != nil { - return 0, fmt.Errorf("setting kafka partition in publisher event failed: %v", err) + return 0, fmt.Errorf("setting kafka partition in publisher event failed: %w", err) } p.partitions = numPartitions diff --git a/libbeat/outputs/kafka/partition_test.go b/libbeat/outputs/kafka/partition_test.go index bebc03f4a446..0a35c2bfdf0e 100644 --- a/libbeat/outputs/kafka/partition_test.go +++ b/libbeat/outputs/kafka/partition_test.go @@ -25,9 +25,10 @@ import ( "testing" "time" - "github.com/Shopify/sarama" "github.com/stretchr/testify/assert" + "github.com/elastic/sarama" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/publisher" diff --git a/libbeat/outputs/redis/backoff.go b/libbeat/outputs/redis/backoff.go index 2abc1f846f0a..42f9db1c2854 100644 --- a/libbeat/outputs/redis/backoff.go +++ b/libbeat/outputs/redis/backoff.go @@ -19,6 +19,7 @@ package redis import ( "context" + "errors" "time" "github.com/gomodule/redigo/redis" @@ -61,7 +62,7 @@ func newBackoffClient(client *client, init, max time.Duration) *backoffClient { } func (b *backoffClient) Connect(ctx context.Context) error { - err := b.client.Connect() + err := b.client.Connect(ctx) if err != nil { // give the client a chance to promote an internal error to a network error. b.updateFailReason(err) @@ -102,7 +103,8 @@ func (b *backoffClient) updateFailReason(err error) { return } - if _, ok := err.(redis.Error); ok { + var redisErr *redis.Error + if errors.As(err, &redisErr) { b.reason = failRedis } else { b.reason = failOther diff --git a/libbeat/outputs/redis/client.go b/libbeat/outputs/redis/client.go index 9f5c9812dd10..db3ec5a3b433 100644 --- a/libbeat/outputs/redis/client.go +++ b/libbeat/outputs/redis/client.go @@ -90,7 +90,7 @@ func newClient( } } -func (c *client) Connect() error { +func (c *client) Connect(_ context.Context) error { c.log.Debug("connect") err := c.Client.Connect() if err != nil { diff --git a/libbeat/tests/integration/kafka_test.go b/libbeat/tests/integration/kafka_test.go new file mode 100644 index 000000000000..ceaf75b01121 --- /dev/null +++ b/libbeat/tests/integration/kafka_test.go @@ -0,0 +1,89 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build integration + +package integration + +import ( + "fmt" + "testing" + "time" + + "github.com/elastic/sarama" +) + +var ( + // https://github.com/elastic/sarama/blob/c7eabfcee7e5bcd7d0071f0ece4d6bec8c33928a/config_test.go#L14-L17 + // The version of MockBroker used when this test was written only supports the lowest protocol version by default. + // Version incompatibilities will result in message decoding errors between the mock and the beat. + kafkaVersion = sarama.MinVersion + kafkaTopic = "test_topic" + kafkaCfg = ` +mockbeat: +logging: + level: debug + selectors: + - publisher_pipeline_output + - kafka +queue.mem: + events: 4096 + flush.timeout: 0s +output.kafka: + topic: %s + version: %s + hosts: + - %s + backoff: + init: 0.1s + max: 0.2s +` +) + +// TestKafkaOutputCanConnectAndPublish ensures the beat Kafka output can successfully produce messages to Kafka. +// Regression test for https://github.com/elastic/beats/issues/41823 where the Kafka output would +// panic on the first Publish because it's Connect method was no longer called. +func TestKafkaOutputCanConnectAndPublish(t *testing.T) { + // Create a Mock Kafka broker that will listen on localhost on a random unallocated port. + // The reference configuration was taken from https://github.com/elastic/sarama/blob/c7eabfcee7e5bcd7d0071f0ece4d6bec8c33928a/async_producer_test.go#L141. + leader := sarama.NewMockBroker(t, 1) + defer leader.Close() + + // The mock broker must respond to a single metadata request. + metadataResponse := new(sarama.MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition(kafkaTopic, 0, leader.BrokerID(), nil, nil, nil, sarama.ErrNoError) + leader.Returns(metadataResponse) + + // The mock broker must return a single produce response. If no produce request is received, the test will fail. + // This guarantees that mockbeat successfully produced a message to Kafka and connectivity is established. + prodSuccess := new(sarama.ProduceResponse) + prodSuccess.AddTopicPartition(kafkaTopic, 0, sarama.ErrNoError) + leader.Returns(prodSuccess) + + // Start mockbeat with the appropriate configuration. + mockbeat := NewBeat(t, "mockbeat", "../../libbeat.test") + mockbeat.WriteConfigFile(fmt.Sprintf(kafkaCfg, kafkaTopic, kafkaVersion, leader.Addr())) + mockbeat.Start() + + // Wait for mockbeat to log that it successfully published a batch to Kafka. + // This ensures that mockbeat received the expected produce response configured above. + mockbeat.WaitForLogs( + `finished kafka batch`, + 10*time.Second, + "did not find finished batch log") +} diff --git a/metricbeat/cmd/root.go b/metricbeat/cmd/root.go index 497b71bed8ad..872f05b8b356 100644 --- a/metricbeat/cmd/root.go +++ b/metricbeat/cmd/root.go @@ -43,9 +43,6 @@ const ( Name = "metricbeat" ) -// RootCmd to handle beats cli -var RootCmd *cmd.BeatsRootCmd - // withECSVersion is a modifier that adds ecs.version to events. var withECSVersion = processing.WithFields(mapstr.M{ "ecs": mapstr.M{ @@ -60,7 +57,7 @@ func MetricbeatSettings(moduleNameSpace string) instance.Settings { if moduleNameSpace == "" { moduleNameSpace = "module" } - var runFlags = pflag.NewFlagSet(Name, pflag.ExitOnError) + runFlags := pflag.NewFlagSet(Name, pflag.ExitOnError) runFlags.AddGoFlag(flag.CommandLine.Lookup("system.hostfs")) cfgfile.AddAllowedBackwardsCompatibleFlag("system.hostfs") return instance.Settings{ @@ -82,7 +79,3 @@ func Initialize(settings instance.Settings) *cmd.BeatsRootCmd { rootCmd.TestCmd.AddCommand(test.GenTestModulesCmd(Name, "", beater.DefaultTestModulesCreator())) return rootCmd } - -func init() { - RootCmd = Initialize(MetricbeatSettings("")) -} diff --git a/metricbeat/docs/modules/gcp.asciidoc b/metricbeat/docs/modules/gcp.asciidoc index 00c1536e5c04..11f3b14eaff0 100644 --- a/metricbeat/docs/modules/gcp.asciidoc +++ b/metricbeat/docs/modules/gcp.asciidoc @@ -346,6 +346,7 @@ metricbeat.modules: credentials_file_path: "your JSON credentials file path" exclude_labels: false period: 1m + location_label: "resource.labels.zone" metrics: - aligner: ALIGN_NONE service: compute diff --git a/metricbeat/main.go b/metricbeat/main.go index 5dcea740b21f..8515bca79d80 100644 --- a/metricbeat/main.go +++ b/metricbeat/main.go @@ -32,7 +32,7 @@ import ( ) func main() { - if err := cmd.RootCmd.Execute(); err != nil { + if err := cmd.Initialize(cmd.MetricbeatSettings("")).Execute(); err != nil { os.Exit(1) } } diff --git a/metricbeat/main_test.go b/metricbeat/main_test.go index 495ce5787e04..be50210575e7 100644 --- a/metricbeat/main_test.go +++ b/metricbeat/main_test.go @@ -21,21 +21,27 @@ package main import ( "flag" + "os" "testing" "github.com/elastic/beats/v7/libbeat/cfgfile" + cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/tests/system/template" - "github.com/elastic/beats/v7/metricbeat/cmd" + mbcmd "github.com/elastic/beats/v7/metricbeat/cmd" ) -var systemTest *bool +var ( + systemTest *bool + mbCommand *cmd.BeatsRootCmd +) func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") - cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + mbCommand = mbcmd.Initialize(mbcmd.MetricbeatSettings("")) + mbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") - cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + mbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } @@ -43,10 +49,12 @@ func init() { func TestSystem(t *testing.T) { cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { - main() + if err := mbCommand.Execute(); err != nil { + os.Exit(1) + } } } func TestTemplate(t *testing.T) { - template.TestTemplate(t, cmd.Name, false) + template.TestTemplate(t, mbCommand.Name(), false) } diff --git a/metricbeat/module/consul/agent/data_integration_test.go b/metricbeat/module/consul/agent/data_integration_test.go index cee25f69311d..cd756451cbd3 100644 --- a/metricbeat/module/consul/agent/data_integration_test.go +++ b/metricbeat/module/consul/agent/data_integration_test.go @@ -22,7 +22,7 @@ package agent import ( "testing" - _ "github.com/denisenkom/go-mssqldb" + _ "github.com/microsoft/go-mssqldb" "github.com/elastic/beats/v7/libbeat/tests/compose" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" diff --git a/metricbeat/module/jolokia/_meta/Dockerfile b/metricbeat/module/jolokia/_meta/Dockerfile index 769749489acd..f70de549ce5b 100644 --- a/metricbeat/module/jolokia/_meta/Dockerfile +++ b/metricbeat/module/jolokia/_meta/Dockerfile @@ -6,7 +6,8 @@ ENV TC apache-tomcat-${TOMCAT_VERSION} ARG JOLOKIA_VERSION RUN apk update && \ - apk add curl openssl ca-certificates bash + apk upgrade --no-cache && \ + apk add curl openssl ca-certificates bash HEALTHCHECK --interval=1s --retries=90 CMD curl -f localhost:8778/jolokia/ EXPOSE 8778 diff --git a/metricbeat/module/jolokia/docker-compose.yml b/metricbeat/module/jolokia/docker-compose.yml index 6ad218468443..ddd274a2c880 100644 --- a/metricbeat/module/jolokia/docker-compose.yml +++ b/metricbeat/module/jolokia/docker-compose.yml @@ -1,5 +1,3 @@ -version: '2.3' - services: jolokia: image: docker.elastic.co/integrations-ci/beats-jolokia:${JOLOKIA_VERSION:-1.5.0}-1 diff --git a/metricbeat/module/kafka/README.md b/metricbeat/module/kafka/README.md index dc31ee0b82f0..5ecfc6b0aa1b 100644 --- a/metricbeat/module/kafka/README.md +++ b/metricbeat/module/kafka/README.md @@ -5,12 +5,18 @@ prepare an environment and manually test Kafka module. #### Kafka container -In order to have a Kafka instance up and running the best way to go is to use the container that is used by the CI tests. +In order to have a Kafka instance up and running the best way to go is to use the container that is used by the CI tests. Make sure to add below entry to `/etc/hosts` + +``` +127.0.0.1 kafka +``` + To bring this container up simply run the tests for Kafka module: `go test -tags integration ./metricbeat/module/kafka/...` + After the tests have been completed, the Kafka container should be still running. Verify with: ```console diff --git a/metricbeat/module/kafka/broker.go b/metricbeat/module/kafka/broker.go index 9d5046447300..be02b18ffc9e 100644 --- a/metricbeat/module/kafka/broker.go +++ b/metricbeat/module/kafka/broker.go @@ -19,6 +19,7 @@ package kafka import ( "crypto/tls" + "errors" "fmt" "io" "net" @@ -26,10 +27,9 @@ import ( "strings" "time" - "github.com/Shopify/sarama" - "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/kafka" + "github.com/elastic/sarama" ) // Version returns a kafka version from its string representation @@ -302,13 +302,20 @@ func queryMetadataWithRetry( b *sarama.Broker, cfg *sarama.Config, topics []string, -) (r *sarama.MetadataResponse, err error) { - err = withRetry(b, cfg, func() (e error) { +) (*sarama.MetadataResponse, error) { + var r *sarama.MetadataResponse + var err error + + err = withRetry(b, cfg, func() error { requ := &sarama.MetadataRequest{Topics: topics} - r, e = b.GetMetadata(requ) - return + r, err = b.GetMetadata(requ) + return err }) - return + + if err != nil { + return nil, err + } + return r, nil } func closeBroker(b *sarama.Broker) { @@ -354,22 +361,20 @@ func checkRetryQuery(err error) (retry, reconnect bool) { return false, false } - if err == io.EOF { + if errors.Is(err, io.EOF) { return true, true } - k, ok := err.(sarama.KError) - if !ok { - return false, false - } - - switch k { - case sarama.ErrLeaderNotAvailable, sarama.ErrReplicaNotAvailable, - sarama.ErrOffsetsLoadInProgress, sarama.ErrRebalanceInProgress: - return true, false - case sarama.ErrRequestTimedOut, sarama.ErrBrokerNotAvailable, - sarama.ErrNetworkException: - return true, true + var k *sarama.KError + if errors.As(err, &k) { + switch *k { + case sarama.ErrLeaderNotAvailable, sarama.ErrReplicaNotAvailable, + sarama.ErrOffsetsLoadInProgress, sarama.ErrRebalanceInProgress: + return true, false + case sarama.ErrRequestTimedOut, sarama.ErrBrokerNotAvailable, + sarama.ErrNetworkException: + return true, true + } } return false, false diff --git a/metricbeat/module/kafka/consumergroup/consumergroup.go b/metricbeat/module/kafka/consumergroup/consumergroup.go index 5fa41b13f019..02d0c7a4a714 100644 --- a/metricbeat/module/kafka/consumergroup/consumergroup.go +++ b/metricbeat/module/kafka/consumergroup/consumergroup.go @@ -52,7 +52,7 @@ var debugf = logp.MakeDebug("kafka") // New creates a new instance of the MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { opts := kafka.MetricSetOptions{ - Version: "0.9.0.0", + Version: "2.2.0", } ms, err := kafka.NewMetricSet(base, opts) diff --git a/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go b/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go index aafb5250499d..85035ee7745e 100644 --- a/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go +++ b/metricbeat/module/kafka/consumergroup/consumergroup_integration_test.go @@ -25,7 +25,7 @@ import ( "testing" "time" - saramacluster "github.com/bsm/sarama-cluster" + "github.com/elastic/sarama" "github.com/elastic/beats/v7/libbeat/tests/compose" "github.com/elastic/beats/v7/metricbeat/mb" @@ -45,7 +45,7 @@ func TestData(t *testing.T) { compose.UpWithAdvertisedHostEnvFileForPort(9092), ) - c, err := startConsumer(t, service.HostForPort(9092), "metricbeat-test") + c, err := startConsumer(t, service.HostForPort(9092), "test-group") if err != nil { t.Fatal(fmt.Errorf("starting kafka consumer: %w", err)) } @@ -68,7 +68,7 @@ func TestFetch(t *testing.T) { compose.UpWithAdvertisedHostEnvFileForPort(9092), ) - c, err := startConsumer(t, service.HostForPort(9092), "metricbeat-test") + c, err := startConsumer(t, service.HostForPort(9092), "test-group") if err != nil { t.Fatal(fmt.Errorf("starting kafka consumer: %w", err)) } @@ -93,19 +93,25 @@ func TestFetch(t *testing.T) { } } -func startConsumer(t *testing.T, host string, topic string) (io.Closer, error) { +func startConsumer(t *testing.T, host string, groupID string) (io.Closer, error) { brokers := []string{host} - topics := []string{topic} - config := saramacluster.NewConfig() + + config := sarama.NewConfig() config.Net.SASL.Enable = true config.Net.SASL.User = kafkaSASLConsumerUsername config.Net.SASL.Password = kafkaSASLConsumerPassword - // The test panics unless CommitInterval is set due to the following bug in sarama: - // https://github.com/Shopify/sarama/issues/1638 - // To work around the issue we need to set CommitInterval, but now sarama emits - // a deprecation warning. - config.Consumer.Offsets.CommitInterval = 1 * time.Second - return saramacluster.NewConsumer(brokers, "test-group", topics, config) + + config.Consumer.Offsets.AutoCommit.Enable = true + config.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second + + // Create a new consumer group + consumerGroup, err := sarama.NewConsumerGroup(brokers, groupID, config) + if err != nil { + t.Fatalf("Error creating consumer group: %v", err) + return nil, err + } + + return consumerGroup, nil } func getConfig(host string) map[string]interface{} { diff --git a/metricbeat/module/kafka/consumergroup/mock_test.go b/metricbeat/module/kafka/consumergroup/mock_test.go index e25abf46a075..bf8a133ccb15 100644 --- a/metricbeat/module/kafka/consumergroup/mock_test.go +++ b/metricbeat/module/kafka/consumergroup/mock_test.go @@ -21,9 +21,8 @@ import ( "fmt" "math/rand" - "github.com/Shopify/sarama" - "github.com/elastic/beats/v7/metricbeat/module/kafka" + "github.com/elastic/sarama" ) type mockClient struct { @@ -125,7 +124,7 @@ func makeFetchGroupOffsets( for i, offset := range partition { T[int32(i)] = &sarama.OffsetFetchResponseBlock{ - Offset: int64(offset), + Offset: offset, } } } diff --git a/metricbeat/module/kafka/consumergroup/query.go b/metricbeat/module/kafka/consumergroup/query.go index 1dc6bdb3c43a..b0659ef433a6 100644 --- a/metricbeat/module/kafka/consumergroup/query.go +++ b/metricbeat/module/kafka/consumergroup/query.go @@ -18,11 +18,10 @@ package consumergroup import ( - "github.com/Shopify/sarama" - "github.com/elastic/beats/v7/metricbeat/module/kafka" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/sarama" ) type client interface { diff --git a/metricbeat/module/kafka/docker-compose.yml b/metricbeat/module/kafka/docker-compose.yml index 9865faf5116e..76bea13fcf8c 100644 --- a/metricbeat/module/kafka/docker-compose.yml +++ b/metricbeat/module/kafka/docker-compose.yml @@ -1,5 +1,3 @@ -version: '2.3' - services: kafka: image: docker.elastic.co/integrations-ci/beats-kafka:${KAFKA_VERSION:-2.2.2}-2 diff --git a/metricbeat/module/kafka/partition/partition.go b/metricbeat/module/kafka/partition/partition.go index 486c9a79e24d..c71aa6198be4 100644 --- a/metricbeat/module/kafka/partition/partition.go +++ b/metricbeat/module/kafka/partition/partition.go @@ -21,13 +21,12 @@ import ( "errors" "fmt" - "github.com/Shopify/sarama" - "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/mb/parse" "github.com/elastic/beats/v7/metricbeat/module/kafka" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/sarama" ) // init registers the partition MetricSet with the central registry. @@ -52,7 +51,7 @@ var debugf = logp.MakeDebug("kafka") // New creates a new instance of the partition MetricSet. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { opts := kafka.MetricSetOptions{ - Version: "0.8.2.0", + Version: "2.2.0", } ms, err := kafka.NewMetricSet(base, opts) @@ -125,7 +124,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { err = errFailQueryOffset } - msg := fmt.Errorf("Failed to query kafka partition (%v:%v) offsets: %v", + msg := fmt.Errorf("failed to query kafka partition (%v:%v) offsets: %w", topic.Name, partition.ID, err) m.Logger().Warn(msg) r.Error(msg) diff --git a/metricbeat/module/kafka/partition/partition_integration_test.go b/metricbeat/module/kafka/partition/partition_integration_test.go index b9e91b943b92..b7d992ba6830 100644 --- a/metricbeat/module/kafka/partition/partition_integration_test.go +++ b/metricbeat/module/kafka/partition/partition_integration_test.go @@ -26,9 +26,10 @@ import ( "testing" "time" - "github.com/Shopify/sarama" "github.com/stretchr/testify/assert" + "github.com/elastic/sarama" + "github.com/elastic/beats/v7/libbeat/tests/compose" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" "github.com/elastic/elastic-agent-libs/logp" @@ -107,13 +108,13 @@ func TestTopic(t *testing.T) { // Its possible that other topics exists -> select the right data for _, data := range dataBefore { if data.ModuleFields["topic"].(mapstr.M)["name"] == testTopic { - offsetBefore = data.MetricSetFields["offset"].(mapstr.M)["newest"].(int64) + offsetBefore, _ = data.MetricSetFields["offset"].(mapstr.M)["newest"].(int64) } } for _, data := range dataAfter { if data.ModuleFields["topic"].(mapstr.M)["name"] == testTopic { - offsetAfter = data.MetricSetFields["offset"].(mapstr.M)["newest"].(int64) + offsetAfter, _ = data.MetricSetFields["offset"].(mapstr.M)["newest"].(int64) } } diff --git a/metricbeat/module/system/raid/blockinfo/getdev.go b/metricbeat/module/system/raid/blockinfo/getdev.go index 02527c806367..7c92b0fc8815 100644 --- a/metricbeat/module/system/raid/blockinfo/getdev.go +++ b/metricbeat/module/system/raid/blockinfo/getdev.go @@ -19,14 +19,15 @@ package blockinfo import ( "fmt" - "io/ioutil" "os" "path/filepath" + + "github.com/elastic/beats/v7/metricbeat/mb" ) // ListAll lists all the multi-disk devices in a RAID array func ListAll(path string) ([]MDDevice, error) { - dir, err := ioutil.ReadDir(path) + dir, err := os.ReadDir(path) if err != nil { return nil, fmt.Errorf("could not read directory: %w", err) } @@ -44,7 +45,7 @@ func ListAll(path string) ([]MDDevice, error) { } if len(mds) == 0 { - return nil, fmt.Errorf("no matches from path %s", path) + return nil, mb.PartialMetricsError{Err: fmt.Errorf("no RAID devices found. You have probably enabled the RAID metrics on a non-RAID system.")} } return mds, nil @@ -69,8 +70,5 @@ func getMDDevice(path string) (MDDevice, error) { // Right now, we're doing this by looking for an `md` directory in the device dir. func isMD(path string) bool { _, err := os.Stat(filepath.Join(path, "md")) - if err != nil { - return false - } - return true + return err == nil } diff --git a/metricbeat/module/system/raid/raid.go b/metricbeat/module/system/raid/raid.go index 191027657d7f..7b07e36c4d2d 100644 --- a/metricbeat/module/system/raid/raid.go +++ b/metricbeat/module/system/raid/raid.go @@ -41,8 +41,11 @@ type MetricSet struct { // New creates a new instance of the raid metricset. func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + sys, ok := base.Module().(resolve.Resolver) + if !ok { + return nil, fmt.Errorf("unexpected module type: %T", base.Module()) + } - sys := base.Module().(resolve.Resolver) return &MetricSet{ BaseMetricSet: base, @@ -62,7 +65,7 @@ func blockto1024(b int64) int64 { func (m *MetricSet) Fetch(r mb.ReporterV2) error { devices, err := blockinfo.ListAll(m.mod.ResolveHostFS("/sys/block")) if err != nil { - return fmt.Errorf("failed to parse sysfs: %w", err) + return fmt.Errorf("failed to list RAID devices: %w", err) } for _, blockDev := range devices { diff --git a/metricbeat/module/system/raid/raid_test.go b/metricbeat/module/system/raid/raid_test.go index 4c35394413af..28b3358f685d 100644 --- a/metricbeat/module/system/raid/raid_test.go +++ b/metricbeat/module/system/raid/raid_test.go @@ -18,10 +18,14 @@ package raid import ( + "errors" + "os" + "path/filepath" "testing" "github.com/stretchr/testify/assert" + "github.com/elastic/beats/v7/metricbeat/mb" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" _ "github.com/elastic/beats/v7/metricbeat/module/system" ) @@ -46,6 +50,22 @@ func TestFetch(t *testing.T) { events[0].BeatEvent("system", "raid").Fields.StringToPrint()) } +func TestFetchNoRAID(t *testing.T) { + // Ensure that we return partial metrics when no RAID devices are present. + tmpDir := t.TempDir() + assert.NoError(t, os.MkdirAll(filepath.Join(tmpDir, "sys/block"), 0755)) + c := getConfig() + c["hostfs"] = tmpDir + + f := mbtest.NewReportingMetricSetV2Error(t, c) + events, errs := mbtest.ReportingFetchV2Error(f) + + assert.Len(t, errs, 1) + assert.ErrorAs(t, errors.Join(errs...), &mb.PartialMetricsError{}) + assert.Contains(t, errors.Join(errs...).Error(), "failed to list RAID devices: no RAID devices found. You have probably enabled the RAID metrics on a non-RAID system.") + assert.Empty(t, events) +} + func getConfig() map[string]interface{} { return map[string]interface{}{ "module": "system", diff --git a/testing/environments/docker/kafka/Dockerfile b/testing/environments/docker/kafka/Dockerfile index afb2df2d9d7c..58fbdb0ddeda 100644 --- a/testing/environments/docker/kafka/Dockerfile +++ b/testing/environments/docker/kafka/Dockerfile @@ -1,13 +1,13 @@ FROM debian:buster -ENV KAFKA_HOME /kafka +ENV KAFKA_HOME=/kafka # Controls the hostname advertised within the Docker network, should generally match the container # name in a docker-compose file. -ENV KAFKA_ADVERTISED_HOST kafka +ENV KAFKA_ADVERTISED_HOST=kafka ENV KAFKA_LOGS_DIR="/kafka-logs" -ENV KAFKA_VERSION 2.2.2 -ENV _JAVA_OPTIONS "-Djava.net.preferIPv4Stack=true" +ENV KAFKA_VERSION=2.2.2 +ENV _JAVA_OPTIONS="-Djava.net.preferIPv4Stack=true" ENV TERM=linux RUN apt-get update && apt-get install -y curl openjdk-11-jre-headless netcat-openbsd diff --git a/x-pack/agentbeat/main.go b/x-pack/agentbeat/main.go index f96031ec081e..44cc6ce33062 100644 --- a/x-pack/agentbeat/main.go +++ b/x-pack/agentbeat/main.go @@ -42,7 +42,7 @@ into a single agentbeat binary.`, prepareCommand(auditbeat.RootCmd), prepareCommand(filebeat.Filebeat()), prepareCommand(heartbeat.RootCmd), - prepareCommand(metricbeat.RootCmd), + prepareCommand(metricbeat.Initialize()), prepareCommand(osquerybeat.RootCmd), prepareCommand(packetbeat.RootCmd), ) diff --git a/x-pack/filebeat/docs/inputs/input-cel.asciidoc b/x-pack/filebeat/docs/inputs/input-cel.asciidoc index a96e8df5f3dd..8e062025b248 100644 --- a/x-pack/filebeat/docs/inputs/input-cel.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-cel.asciidoc @@ -795,6 +795,26 @@ This specifies fields in the `state` to be redacted prior to debug logging. Fiel This specifies whether fields should be replaced with a `*` or deleted entirely from messages sent to debug logs. If delete is `true`, fields will be deleted rather than replaced. +[float] +==== `failure_dump.enabled` + +It is possible to log CEL program evaluation failures to a local file-system for debugging configurations. +This option is enabled by setting `failure_dump.enabled` to true and setting the `failure_dump.filename` value. +To delete existing failure dumps, set `failure_dump.enabled` to false without unsetting the filename option. + +Enabling this option compromises security and should only be used for debugging. + +[float] +==== `failure_dump.filename` + +This specifies a directory path to write failure dumps to. If it is not empty and a CEL program evaluation fails, +the complete set of states for the CEL program's evaluation will be written as a JSON file, along with the error +that was reported. This option should only be used when debugging a failure as it imposes a significant performance +impact on the input and may potentially use large quantities of memory to hold the full set of states. If a failure +dump is configured, it is recommended that data input sizes be reduced to avoid excessive memory consumption, and +making dumps that are intractable to analysis. To delete existing failure dumps, set `failure_dump.enabled` to +false without unsetting the filename option. + [float] === Metrics diff --git a/x-pack/filebeat/docs/inputs/input-gcs.asciidoc b/x-pack/filebeat/docs/inputs/input-gcs.asciidoc index 23ac0e021c6a..2a762ddec18a 100644 --- a/x-pack/filebeat/docs/inputs/input-gcs.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-gcs.asciidoc @@ -10,9 +10,7 @@ ++++ Use the `google cloud storage input` to read content from files stored in buckets which reside on your Google Cloud. -The input can be configured to work with and without polling, though currently, if polling is disabled it will only -perform a one time passthrough, list the file contents and end the process. Polling is generally recommented for most cases -even though it can get expensive with dealing with a very large number of files. +The input can be configured to work with and without polling, though if polling is disabled it will only perform a single collection of data, list the file contents and end the process. *To mitigate errors and ensure a stable processing environment, this input employs the following features :* @@ -66,12 +64,11 @@ many buckets as we deem fit. We are also able to configure the attributes `max_w then be applied to all buckets which do not specify any of these attributes explicitly. NOTE: If the attributes `max_workers`, `poll`, `poll_interval` and `bucket_timeout` are specified at the root level, these can still be overridden at the bucket level with -different values, thus offering extensive flexibility and customization. Examples <> show this behaviour. +different values, thus offering extensive flexibility and customization. Examples <> show this behavior. On receiving this config the google cloud storage input will connect to the service and retrieve a `Storage Client` using the given `bucket_name` and `auth.credentials_file`, then it will spawn two main go-routines, one for each bucket. After this each of these routines (threads) will initialize a scheduler -which will in turn use the `max_workers` value to initialize an in-memory worker pool (thread pool) with `3` `workers` available. Basically that equates to two instances of a worker pool, -one per bucket, each having 3 workers. These `workers` will be responsible for performing `jobs` that process a file (in this case read and output the contents of a file). +which will in turn use the `max_workers` value to initialize an in-memory worker pool (thread pool) with `3` `workers` available. Basically that equates to two instances of a worker pool, one per bucket, each having 3 workers. These `workers` will be responsible for performing `jobs` that process a file (in this case read and output the contents of a file). NOTE: The scheduler is responsible for scheduling jobs, and uses the `maximum available workers` in the pool, at each iteration, to decide the number of files to retrieve and process. This keeps work distribution efficient. The scheduler uses `poll_interval` attribute value to decide how long to wait after each iteration. The `bucket_timeout` value is used to timeout calls to the bucket list api if it exceeds the given value. Each iteration consists of processing a certain number of files, decided by the `maximum available workers` value. @@ -213,7 +210,7 @@ This is a specific subfield of a bucket. It specifies the bucket name. This attribute defines the maximum amount of time after which a bucket operation will give and stop if no response is recieved (example: reading a file / listing a file). It can be defined in the following formats : `{{x}}s`, `{{x}}m`, `{{x}}h`, here `s = seconds`, `m = minutes` and `h = hours`. The value `{{x}}` can be anything we wish. -If no value is specified for this, by default its initialized to `50 seconds`. This attribute can be specified both at the root level of the configuration as well at the bucket level. The bucket level values will always take priority and override the root level values if both are specified. The value of `bucket_timeout` that should be used depends on the size of the files and the network speed. If the timeout is too low, the input will not be able to read the file completely and `context_deadline_exceeded` errors will be seen in the logs. If the timeout is too high, the input will wait for a long time for the file to be read, which can cause the input to be slow. The ratio between the `bucket_timeout` and `poll_interval` should be considered while setting both the values. A low `poll_interval` and a very high `bucket_timeout` can cause resource utilization issues as schedule ops will be spawned every poll iteration. If previous poll ops are still running, this could result in concurrently running ops and so could cause a bottleneck over time. +If no value is specified for this, by default its initialized to `120 seconds`. This attribute can be specified both at the root level of the configuration as well at the bucket level. The bucket level values will always take priority and override the root level values if both are specified. The value of `bucket_timeout` that should be used depends on the size of the files and the network speed. If the timeout is too low, the input will not be able to read the file completely and `context_deadline_exceeded` errors will be seen in the logs. If the timeout is too high, the input will wait for a long time for the file to be read, which can cause the input to be slow. The ratio between the `bucket_timeout` and `poll_interval` should be considered while setting both the values. A low `poll_interval` and a very high `bucket_timeout` can cause resource utilization issues as schedule ops will be spawned every poll iteration. If previous poll ops are still running, this could result in concurrently running ops and so could cause a bottleneck over time. [id="attrib-max_workers-gcs"] [float] @@ -228,9 +225,8 @@ NOTE: The value of `max_workers` is tied to the `batch_size` currently to ensure [float] ==== `poll` -This attribute informs the scheduler whether to keep polling for new files or not. Default value of this is `false`, so it will not keep polling if not explicitly -specified. This attribute can be specified both at the root level of the configuration as well at the bucket level. The bucket level values will always -take priority and override the root level values if both are specified. +This attribute informs the scheduler whether to keep polling for new files or not. Default value of this is set to `true`. This attribute can be specified both at the +root level of the configuration as well at the bucket level. The bucket level values will always take priority and override the root level values if both are specified. [id="attrib-poll_interval-gcs"] [float] @@ -238,7 +234,7 @@ take priority and override the root level values if both are specified. This attribute defines the maximum amount of time after which the internal scheduler will make the polling call for the next set of objects/files. It can be defined in the following formats : `{{x}}s`, `{{x}}m`, `{{x}}h`, here `s = seconds`, `m = minutes` and `h = hours`. The value `{{x}}` can be anything we wish. -Example : `10s` would mean we would like the polling to occur every 10 seconds. If no value is specified for this, by default its initialized to `300 seconds`. +Example : `10s` would mean we would like the polling to occur every 10 seconds. If no value is specified for this, by default its initialized to `5 minutes`. This attribute can be specified both at the root level of the configuration as well at the bucket level. The bucket level values will always take priority and override the root level values if both are specified. The `poll_interval` should be set to a value that is equal to the `bucket_timeout` value. This would ensure that another schedule operation is not started before the current buckets have all been processed. If the `poll_interval` is set to a value that is less than the `bucket_timeout`, then the input will start another schedule operation before the current one has finished, which can cause a bottleneck over time. Having a lower `poll_interval` can make the input faster at the cost of more resource utilization. diff --git a/x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc b/x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc index afd39fec0f12..29054ef9341f 100644 --- a/x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc @@ -40,6 +40,7 @@ These are the possible response codes from the server. | 406 | Not Acceptable | Returned if the POST request does not contain a body. | 415 | Unsupported Media Type | Returned if the Content-Type is not application/json. Or if Content-Encoding is present and is not gzip. | 500 | Internal Server Error | Returned if an I/O error occurs reading the request. +| 503 | Service Unavailable | Returned if the length of the request body would take the total number of in-flight bytes above the configured `max_in_flight_bytes` value. | 504 | Gateway Timeout | Returned if a request publication cannot be ACKed within the required timeout. |========================================================================================================================================================= @@ -285,6 +286,16 @@ The prefix for the signature. Certain webhooks prefix the HMAC signature with a By default the input expects the incoming POST to include a Content-Type of `application/json` to try to enforce the incoming data to be valid JSON. In certain scenarios when the source of the request is not able to do that, it can be overwritten with another value or set to null. +[float] +==== `max_in_flight_bytes` + +The total sum of request body lengths that are allowed at any given time. If non-zero, the input will compare this value to the sum of in-flight request body lengths from requests that include a `wait_for_completion_timeout` request query and will return a 503 HTTP status code, along with a Retry-After header configured with the `retry_after` option. The default value for this option is zero, no limit. + +[float] +==== `retry_after` + +If a request has exceeded the `max_in_flight_bytes` limit, the response to the client will include a Retry-After header specifying how many seconds the client should wait to retry again. The default value for this option is 10 seconds. + [float] ==== `program` diff --git a/x-pack/filebeat/input/cel/config.go b/x-pack/filebeat/input/cel/config.go index b04b78457198..aee095b199b9 100644 --- a/x-pack/filebeat/input/cel/config.go +++ b/x-pack/filebeat/input/cel/config.go @@ -58,6 +58,9 @@ type config struct { // Resource is the configuration for establishing an // HTTP request or for locating a local resource. Resource *ResourceConfig `config:"resource" validate:"required"` + + // FailureDump configures failure dump behaviour. + FailureDump *dumpConfig `config:"failure_dump"` } type redact struct { @@ -69,6 +72,19 @@ type redact struct { Delete bool `config:"delete"` } +// dumpConfig configures the CEL program to retain +// the full evaluation state using the cel.OptTrackState +// option. The state is written to a file in the path if +// the evaluation fails. +type dumpConfig struct { + Enabled *bool `config:"enabled"` + Filename string `config:"filename"` +} + +func (t *dumpConfig) enabled() bool { + return t != nil && (t.Enabled == nil || *t.Enabled) +} + func (c config) Validate() error { if c.Redact == nil { logp.L().Named("input.cel").Warn("missing recommended 'redact' configuration: " + @@ -89,7 +105,8 @@ func (c config) Validate() error { if len(c.Regexps) != 0 { patterns = map[string]*regexp.Regexp{".": nil} } - _, _, err = newProgram(context.Background(), c.Program, root, nil, &http.Client{}, nil, nil, patterns, c.XSDs, logp.L().Named("input.cel"), nil) + wantDump := c.FailureDump.enabled() && c.FailureDump.Filename != "" + _, _, err = newProgram(context.Background(), c.Program, root, nil, &http.Client{}, nil, nil, patterns, c.XSDs, logp.L().Named("input.cel"), nil, wantDump) if err != nil { return fmt.Errorf("failed to check program: %w", err) } diff --git a/x-pack/filebeat/input/cel/input.go b/x-pack/filebeat/input/cel/input.go index ff4f1dccf51f..97ab8c9bee05 100644 --- a/x-pack/filebeat/input/cel/input.go +++ b/x-pack/filebeat/input/cel/input.go @@ -10,6 +10,7 @@ package cel import ( "compress/gzip" "context" + "encoding/json" "errors" "fmt" "io" @@ -166,7 +167,8 @@ func (i input) run(env v2.Context, src *source, cursor map[string]interface{}, p Password: cfg.Auth.Basic.Password, } } - prg, ast, err := newProgram(ctx, cfg.Program, root, getEnv(cfg.AllowedEnvironment), client, limiter, auth, patterns, cfg.XSDs, log, trace) + wantDump := cfg.FailureDump.enabled() && cfg.FailureDump.Filename != "" + prg, ast, err := newProgram(ctx, cfg.Program, root, getEnv(cfg.AllowedEnvironment), client, limiter, auth, patterns, cfg.XSDs, log, trace, wantDump) if err != nil { return err } @@ -251,12 +253,25 @@ func (i input) run(env v2.Context, src *source, cursor map[string]interface{}, p log.Debugw("request state", logp.Namespace("cel"), "state", redactor{state: state, cfg: cfg.Redact}) metrics.executions.Add(1) start := i.now().In(time.UTC) - state, err = evalWith(ctx, prg, ast, state, start) + state, err = evalWith(ctx, prg, ast, state, start, wantDump) log.Debugw("response state", logp.Namespace("cel"), "state", redactor{state: state, cfg: cfg.Redact}) if err != nil { + var dump dumpError switch { case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): return err + case errors.As(err, &dump): + path := strings.ReplaceAll(cfg.FailureDump.Filename, "*", sanitizeFileName(env.IDWithoutName)) + dir := filepath.Dir(path) + base := filepath.Base(path) + ext := filepath.Ext(base) + prefix := strings.TrimSuffix(base, ext) + path = filepath.Join(dir, prefix+"-"+i.now().In(time.UTC).Format("2006-01-02T15-04-05.000")+ext) + log.Debugw("writing failure dump file", "path", path) + err := dump.writeToFile(path) + if err != nil { + log.Errorw("failed to write failure dump", "path", path, "error", err) + } } log.Errorw("failed evaluation", "error", err) env.UpdateStatus(status.Degraded, "failed evaluation: "+err.Error()) @@ -785,6 +800,26 @@ func newClient(ctx context.Context, cfg config, log *logp.Logger, reg *monitorin } } } + if !cfg.FailureDump.enabled() && cfg.FailureDump != nil && cfg.FailureDump.Filename != "" { + // We have a fail-dump name, but we are not enabled, + // so remove all dumps we own. + err = os.Remove(cfg.FailureDump.Filename) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + log.Errorw("failed to remove request trace log", "path", cfg.FailureDump.Filename, "error", err) + } + ext := filepath.Ext(cfg.FailureDump.Filename) + base := strings.TrimSuffix(cfg.FailureDump.Filename, ext) + paths, err := filepath.Glob(base + "-" + lumberjackTimestamp + ext) + if err != nil { + log.Errorw("failed to collect request trace log path names", "error", err) + } + for _, p := range paths { + err = os.Remove(p) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + log.Errorw("failed to remove request trace log", "path", p, "error", err) + } + } + } if reg != nil { c.Transport = httpmon.NewMetricsRoundTripper(c.Transport, reg) @@ -1004,7 +1039,7 @@ func getEnv(allowed []string) map[string]string { return env } -func newProgram(ctx context.Context, src, root string, vars map[string]string, client *http.Client, limiter *rate.Limiter, auth *lib.BasicAuth, patterns map[string]*regexp.Regexp, xsd map[string]string, log *logp.Logger, trace *httplog.LoggingRoundTripper) (cel.Program, *cel.Ast, error) { +func newProgram(ctx context.Context, src, root string, vars map[string]string, client *http.Client, limiter *rate.Limiter, auth *lib.BasicAuth, patterns map[string]*regexp.Regexp, xsd map[string]string, log *logp.Logger, trace *httplog.LoggingRoundTripper, details bool) (cel.Program, *cel.Ast, error) { xml, err := lib.XML(nil, xsd) if err != nil { return nil, nil, fmt.Errorf("failed to build xml type hints: %w", err) @@ -1043,7 +1078,11 @@ func newProgram(ctx context.Context, src, root string, vars map[string]string, c return nil, nil, fmt.Errorf("failed compilation: %w", iss.Err()) } - prg, err := env.Program(ast) + var progOpts []cel.ProgramOption + if details { + progOpts = []cel.ProgramOption{cel.EvalOptions(cel.OptTrackState)} + } + prg, err := env.Program(ast, progOpts...) if err != nil { return nil, nil, fmt.Errorf("failed program instantiation: %w", err) } @@ -1065,8 +1104,8 @@ func debug(log *logp.Logger, trace *httplog.LoggingRoundTripper) func(string, an } } -func evalWith(ctx context.Context, prg cel.Program, ast *cel.Ast, state map[string]interface{}, now time.Time) (map[string]interface{}, error) { - out, _, err := prg.ContextEval(ctx, map[string]interface{}{ +func evalWith(ctx context.Context, prg cel.Program, ast *cel.Ast, state map[string]interface{}, now time.Time, details bool) (map[string]interface{}, error) { + out, det, err := prg.ContextEval(ctx, map[string]interface{}{ // Replace global program "now" with current time. This is necessary // as the lib.Time now global is static at program instantiation time // which will persist over multiple evaluations. The lib.Time behaviour @@ -1081,6 +1120,9 @@ func evalWith(ctx context.Context, prg cel.Program, ast *cel.Ast, state map[stri }) if err != nil { err = lib.DecoratedError{AST: ast, Err: err} + if details { + err = dumpError{error: err, dump: lib.NewDump(ast, det)} + } } if e := ctx.Err(); e != nil { err = e @@ -1109,6 +1151,36 @@ func evalWith(ctx context.Context, prg cel.Program, ast *cel.Ast, state map[stri } } +// dumpError is an evaluation state dump associated with an error. +type dumpError struct { + error + dump *lib.Dump +} + +func (e dumpError) writeToFile(path string) (err error) { + err = os.MkdirAll(filepath.Dir(path), 0o700) + if err != nil { + return err + } + f, err := os.Create(path) + if err != nil { + return err + } + defer func() { + err = errors.Join(err, f.Sync(), f.Close()) + }() + enc := json.NewEncoder(f) + enc.SetEscapeHTML(false) + type dump struct { + Error string `json:"error"` + State []lib.NodeValue `json:"state"` + } + return enc.Encode(dump{ + Error: e.Error(), + State: e.dump.NodeValues(), + }) +} + // clearWantMore sets the state to not request additional work in a periodic evaluation. // It leaves state intact if there is no "want_more" element, and sets the element to false // if there is. This is necessary instead of just doing delete(state, "want_more") as diff --git a/x-pack/filebeat/input/cel/input_test.go b/x-pack/filebeat/input/cel/input_test.go index 0d91710ca093..143402d98347 100644 --- a/x-pack/filebeat/input/cel/input_test.go +++ b/x-pack/filebeat/input/cel/input_test.go @@ -45,6 +45,7 @@ var inputTests = []struct { want []map[string]interface{} wantCursor []map[string]interface{} wantErr error + prepare func() error wantFile string wantNoFile string }{ @@ -1685,6 +1686,88 @@ var inputTests = []struct { }, }}, }, + { + name: "dump_no_error", + config: map[string]interface{}{ + "interval": 1, + "program": `{"events":[{"message":{"value": try(debug("divide by zero", 0/0))}}]}`, + "state": nil, + "resource": map[string]interface{}{ + "url": "", + }, + "failure_dump": map[string]interface{}{ + "enabled": true, + "filename": "failure_dumps/dump.json", + }, + }, + time: func() time.Time { return time.Date(2010, 2, 8, 0, 0, 0, 0, time.UTC) }, + wantNoFile: filepath.Join("failure_dumps", "dump-2010-02-08T00-00-00.000.json"), + want: []map[string]interface{}{{ + "message": map[string]interface{}{ + "value": "division by zero", + }, + }}, + }, + { + name: "dump_error", + config: map[string]interface{}{ + "interval": 1, + "program": `{"events":[{"message":{"value": debug("divide by zero", 0/0)}}]}`, + "state": nil, + "resource": map[string]interface{}{ + "url": "", + }, + "failure_dump": map[string]interface{}{ + "enabled": true, + "filename": "failure_dumps/dump.json", + }, + }, + time: func() time.Time { return time.Date(2010, 2, 9, 0, 0, 0, 0, time.UTC) }, + wantFile: filepath.Join("failure_dumps", "dump-2010-02-09T00-00-00.000.json"), // One day after the no dump case. + want: []map[string]interface{}{ + { + "error": map[string]interface{}{ + "message": `failed eval: ERROR: :1:58: division by zero + | {"events":[{"message":{"value": debug("divide by zero", 0/0)}}]} + | .........................................................^`, + }, + }, + }, + }, + { + name: "dump_error_delete", + config: map[string]interface{}{ + "interval": 1, + "program": `{"events":[{"message":{"value": debug("divide by zero", 0/0)}}]}`, + "state": nil, + "resource": map[string]interface{}{ + "url": "", + }, + "failure_dump": map[string]interface{}{ + "enabled": false, // We have a name but are disabled, so delete. + "filename": "failure_dumps/dump.json", + }, + }, + time: func() time.Time { return time.Date(2010, 2, 9, 0, 0, 0, 0, time.UTC) }, + prepare: func() error { + // Make a file that the configuration should delete. + err := os.MkdirAll("failure_dumps", 0o700) + if err != nil { + return err + } + return os.WriteFile(filepath.Join("failure_dumps", "dump-2010-02-09T00-00-00.000.json"), nil, 0o600) + }, + wantNoFile: filepath.Join("failure_dumps", "dump-2010-02-09T00-00-00.000.json"), // One day after the no dump case. + want: []map[string]interface{}{ + { + "error": map[string]interface{}{ + "message": `failed eval: ERROR: :1:58: division by zero + | {"events":[{"message":{"value": debug("divide by zero", 0/0)}}]} + | .........................................................^`, + }, + }, + }, + }, // not yet done from httpjson (some are redundant since they are compositional products). // @@ -1708,6 +1791,11 @@ func TestInput(t *testing.T) { os.Setenv("CELTESTENVVAR", "TESTVALUE") os.Setenv("DISALLOWEDCELTESTENVVAR", "DISALLOWEDTESTVALUE") + err := os.RemoveAll("failure_dumps") + if err != nil { + t.Fatalf("failed to remove failure_dumps directory: %v", err) + } + logp.TestingSetup() for _, test := range inputTests { t.Run(test.name, func(t *testing.T) { @@ -1718,6 +1806,13 @@ func TestInput(t *testing.T) { t.Skip("skipping remote endpoint test") } + if test.prepare != nil { + err := test.prepare() + if err != nil { + t.Fatalf("unexpected from prepare(): %v", err) + } + } + if test.server != nil { test.server(t, test.handler, test.config) } @@ -1770,6 +1865,20 @@ func TestInput(t *testing.T) { if fmt.Sprint(err) != fmt.Sprint(test.wantErr) { t.Errorf("unexpected error from running input: got:%v want:%v", err, test.wantErr) } + if test.wantFile != "" { + if _, err := os.Stat(filepath.Join(tempDir, test.wantFile)); err != nil { + t.Errorf("expected log file not found: %v", err) + } + } + if test.wantNoFile != "" { + paths, err := filepath.Glob(filepath.Join(tempDir, test.wantNoFile)) + if err != nil { + t.Fatalf("unexpected error calling filepath.Glob(%q): %v", test.wantNoFile, err) + } + if len(paths) != 0 { + t.Errorf("unexpected files found: %v", paths) + } + } if test.wantErr != nil { return } @@ -1802,20 +1911,6 @@ func TestInput(t *testing.T) { t.Errorf("unexpected cursor for event %d: got:- want:+\n%s", i, cmp.Diff(got, test.wantCursor[i])) } } - if test.wantFile != "" { - if _, err := os.Stat(filepath.Join(tempDir, test.wantFile)); err != nil { - t.Errorf("expected log file not found: %v", err) - } - } - if test.wantNoFile != "" { - paths, err := filepath.Glob(filepath.Join(tempDir, test.wantNoFile)) - if err != nil { - t.Fatalf("unexpected error calling filepath.Glob(%q): %v", test.wantNoFile, err) - } - if len(paths) != 0 { - t.Errorf("unexpected files found: %v", paths) - } - } }) } } diff --git a/x-pack/filebeat/input/gcs/client.go b/x-pack/filebeat/input/gcs/client.go index 7fd45d2d0a9c..1846e08c5ab0 100644 --- a/x-pack/filebeat/input/gcs/client.go +++ b/x-pack/filebeat/input/gcs/client.go @@ -12,11 +12,9 @@ import ( "cloud.google.com/go/storage" "golang.org/x/oauth2/google" "google.golang.org/api/option" - - "github.com/elastic/elastic-agent-libs/logp" ) -func fetchStorageClient(ctx context.Context, cfg config, log *logp.Logger) (*storage.Client, error) { +func fetchStorageClient(ctx context.Context, cfg config) (*storage.Client, error) { if cfg.AlternativeHost != "" { var h *url.URL h, err := url.Parse(cfg.AlternativeHost) diff --git a/x-pack/filebeat/input/gcs/config.go b/x-pack/filebeat/input/gcs/config.go index 6a7b93d5e479..64f64c69bc5f 100644 --- a/x-pack/filebeat/input/gcs/config.go +++ b/x-pack/filebeat/input/gcs/config.go @@ -28,16 +28,16 @@ type config struct { // Auth - Defines the authentication mechanism to be used for accessing the gcs bucket. Auth authConfig `config:"auth"` // MaxWorkers - Defines the maximum number of go routines that will be spawned. - MaxWorkers *int `config:"max_workers,omitempty" validate:"max=5000"` + MaxWorkers int `config:"max_workers" validate:"max=5000"` // Poll - Defines if polling should be performed on the input bucket source. - Poll *bool `config:"poll,omitempty"` + Poll bool `config:"poll"` // PollInterval - Defines the maximum amount of time to wait before polling for the next batch of objects from the bucket. - PollInterval *time.Duration `config:"poll_interval,omitempty"` + PollInterval time.Duration `config:"poll_interval"` // ParseJSON - Informs the publisher whether to parse & objectify json data or not. By default this is set to // false, since it can get expensive dealing with highly nested json data. - ParseJSON *bool `config:"parse_json,omitempty"` + ParseJSON bool `config:"parse_json"` // BucketTimeOut - Defines the maximum time that the sdk will wait for a bucket api response before timing out. - BucketTimeOut *time.Duration `config:"bucket_timeout,omitempty"` + BucketTimeOut time.Duration `config:"bucket_timeout"` // Buckets - Defines a list of buckets that will be polled for objects. Buckets []bucket `config:"buckets" validate:"required"` // FileSelectors - Defines a list of regex patterns that can be used to filter out objects from the bucket. @@ -49,17 +49,17 @@ type config struct { // ExpandEventListFromField - Defines the field name that will be used to expand the event into separate events. ExpandEventListFromField string `config:"expand_event_list_from_field"` // This field is only used for system test purposes, to override the HTTP endpoint. - AlternativeHost string `config:"alternative_host,omitempty"` + AlternativeHost string `config:"alternative_host"` } // bucket contains the config for each specific object storage bucket in the root account type bucket struct { Name string `config:"name" validate:"required"` - MaxWorkers *int `config:"max_workers,omitempty" validate:"max=5000"` - BucketTimeOut *time.Duration `config:"bucket_timeout,omitempty"` - Poll *bool `config:"poll,omitempty"` - PollInterval *time.Duration `config:"poll_interval,omitempty"` - ParseJSON *bool `config:"parse_json,omitempty"` + MaxWorkers *int `config:"max_workers" validate:"max=5000"` + BucketTimeOut *time.Duration `config:"bucket_timeout"` + Poll *bool `config:"poll"` + PollInterval *time.Duration `config:"poll_interval"` + ParseJSON *bool `config:"parse_json"` FileSelectors []fileSelectorConfig `config:"file_selectors"` ReaderConfig readerConfig `config:",inline"` TimeStampEpoch *int64 `config:"timestamp_epoch"` @@ -78,13 +78,15 @@ type readerConfig struct { Decoding decoderConfig `config:"decoding"` } +// authConfig defines the authentication mechanism to be used for accessing the gcs bucket. +// If either is configured the 'omitempty' tag will prevent the other option from being serialized in the config. type authConfig struct { CredentialsJSON *jsonCredentialsConfig `config:"credentials_json,omitempty"` CredentialsFile *fileCredentialsConfig `config:"credentials_file,omitempty"` } type fileCredentialsConfig struct { - Path string `config:"path,omitempty"` + Path string `config:"path"` } type jsonCredentialsConfig struct { AccountKey string `config:"account_key"` @@ -115,3 +117,14 @@ func (c authConfig) Validate() error { return fmt.Errorf("no authentication credentials were configured or detected " + "(credentials_file, credentials_json, and application default credentials (ADC))") } + +// defaultConfig returns the default configuration for the input +func defaultConfig() config { + return config{ + MaxWorkers: 1, + Poll: true, + PollInterval: 5 * time.Minute, + BucketTimeOut: 120 * time.Second, + ParseJSON: false, + } +} diff --git a/x-pack/filebeat/input/gcs/input.go b/x-pack/filebeat/input/gcs/input.go index cc0e9ad74bbb..33e46d034d76 100644 --- a/x-pack/filebeat/input/gcs/input.go +++ b/x-pack/filebeat/input/gcs/input.go @@ -50,7 +50,7 @@ func Plugin(log *logp.Logger, store cursor.StateStore) v2.Plugin { } func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { - config := config{} + config := defaultConfig() if err := cfg.Unpack(&config); err != nil { return nil, nil, err } @@ -78,44 +78,22 @@ func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { return sources, &gcsInput{config: config}, nil } -// tryOverrideOrDefault, overrides global values with local -// bucket level values if present. If both global & local values -// are absent, assigns default values +// tryOverrideOrDefault, overrides the bucket level values with global values if the bucket fields are not set func tryOverrideOrDefault(cfg config, b bucket) bucket { if b.MaxWorkers == nil { - maxWorkers := 1 - if cfg.MaxWorkers != nil { - maxWorkers = *cfg.MaxWorkers - } - b.MaxWorkers = &maxWorkers + b.MaxWorkers = &cfg.MaxWorkers } if b.Poll == nil { - var poll bool - if cfg.Poll != nil { - poll = *cfg.Poll - } - b.Poll = &poll + b.Poll = &cfg.Poll } if b.PollInterval == nil { - interval := time.Second * 300 - if cfg.PollInterval != nil { - interval = *cfg.PollInterval - } - b.PollInterval = &interval + b.PollInterval = &cfg.PollInterval } if b.ParseJSON == nil { - parse := false - if cfg.ParseJSON != nil { - parse = *cfg.ParseJSON - } - b.ParseJSON = &parse + b.ParseJSON = &cfg.ParseJSON } if b.BucketTimeOut == nil { - timeOut := time.Second * 50 - if cfg.BucketTimeOut != nil { - timeOut = *cfg.BucketTimeOut - } - b.BucketTimeOut = &timeOut + b.BucketTimeOut = &cfg.BucketTimeOut } if b.TimeStampEpoch == nil { b.TimeStampEpoch = cfg.TimeStampEpoch @@ -173,11 +151,12 @@ func (input *gcsInput) Run(inputCtx v2.Context, src cursor.Source, cancel() }() - client, err := fetchStorageClient(ctx, input.config, log) + client, err := fetchStorageClient(ctx, input.config) if err != nil { metrics.errorsTotal.Inc() return err } + bucket := client.Bucket(currentSource.BucketName).Retryer( // Use WithBackoff to change the timing of the exponential backoff. storage.WithBackoff(gax.Backoff{ diff --git a/x-pack/filebeat/input/gcs/input_stateless.go b/x-pack/filebeat/input/gcs/input_stateless.go index f56f7f35bc55..c0038bf31dce 100644 --- a/x-pack/filebeat/input/gcs/input_stateless.go +++ b/x-pack/filebeat/input/gcs/input_stateless.go @@ -88,7 +88,6 @@ func (in *statelessInput) Run(inputCtx v2.Context, publisher stateless.Publisher // Since we are only reading, the operation is always idempotent storage.WithPolicy(storage.RetryAlways), ) - scheduler := newScheduler(pub, bkt, currentSource, &in.config, st, metrics, log) // allows multiple containers to be scheduled concurrently while testing // the stateless input is triggered only while testing and till now it did not mimic diff --git a/x-pack/filebeat/input/gcs/input_test.go b/x-pack/filebeat/input/gcs/input_test.go index 8accb774f384..5595622c93e5 100644 --- a/x-pack/filebeat/input/gcs/input_test.go +++ b/x-pack/filebeat/input/gcs/input_test.go @@ -535,7 +535,7 @@ func Test_StorageClient(t *testing.T) { client, _ := storage.NewClient(context.Background(), option.WithEndpoint(serv.URL), option.WithoutAuthentication(), option.WithHTTPClient(&httpclient)) cfg := conf.MustNewConfigFrom(tt.baseConfig) - conf := config{} + conf := defaultConfig() err := cfg.Unpack(&conf) if err != nil { assert.EqualError(t, err, fmt.Sprint(tt.isError)) @@ -558,8 +558,8 @@ func Test_StorageClient(t *testing.T) { }) var timeout *time.Timer - if conf.PollInterval != nil { - timeout = time.NewTimer(1*time.Second + *conf.PollInterval) + if conf.PollInterval != 0 { + timeout = time.NewTimer(1*time.Second + conf.PollInterval) } else { timeout = time.NewTimer(5 * time.Second) } diff --git a/x-pack/filebeat/input/http_endpoint/config.go b/x-pack/filebeat/input/http_endpoint/config.go index 5e1c93d2bc36..977e7b5d7d26 100644 --- a/x-pack/filebeat/input/http_endpoint/config.go +++ b/x-pack/filebeat/input/http_endpoint/config.go @@ -37,6 +37,8 @@ type config struct { URL string `config:"url" validate:"required"` Prefix string `config:"prefix"` ContentType string `config:"content_type"` + MaxInFlight int64 `config:"max_in_flight_bytes"` + RetryAfter int `config:"retry_after"` Program string `config:"program"` SecretHeader string `config:"secret.header"` SecretValue string `config:"secret.value"` @@ -66,6 +68,7 @@ func defaultConfig() config { BasicAuth: false, ResponseCode: 200, ResponseBody: `{"message": "success"}`, + RetryAfter: 10, ListenAddress: "127.0.0.1", ListenPort: "8000", URL: "/", diff --git a/x-pack/filebeat/input/http_endpoint/handler.go b/x-pack/filebeat/input/http_endpoint/handler.go index 27f4d12253ef..388287a7e582 100644 --- a/x-pack/filebeat/input/http_endpoint/handler.go +++ b/x-pack/filebeat/input/http_endpoint/handler.go @@ -56,6 +56,20 @@ type handler struct { txBaseID string // Random value to make transaction IDs unique. txIDCounter atomic.Uint64 // Transaction ID counter that is incremented for each request. + // inFlight is the sum of message body length + // that have been received but not yet ACKed + // or timed out or otherwise handled. + // + // Requests that do not request a timeout do + // not contribute to this value. + inFlight atomic.Int64 + // maxInFlight is the maximum value of inFligh + // that will be allowed for any messages received + // by the handler. If non-zero, inFlight may + // not exceed this value. + maxInFlight int64 + retryAfter int + reqLogger *zap.Logger host, scheme string @@ -86,9 +100,38 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { acked chan struct{} timeout *time.Timer ) + if h.maxInFlight != 0 { + // Consider non-ACKing messages as well. These do not add + // to the sum of in-flight bytes, but we can still assess + // whether a message would take us over the limit. + inFlight := h.inFlight.Load() + r.ContentLength + if inFlight > h.maxInFlight { + w.Header().Set(headerContentEncoding, "application/json") + w.Header().Set("Retry-After", strconv.Itoa(h.retryAfter)) + w.WriteHeader(http.StatusServiceUnavailable) + _, err := fmt.Fprintf(w, + `{"warn":"max in flight message memory exceeded","max_in_flight":%d,"in_flight":%d}`, + h.maxInFlight, inFlight, + ) + if err != nil { + h.log.Errorw("failed to write 503", "error", err) + } + return + } + } if wait != 0 { acked = make(chan struct{}) timeout = time.NewTimer(wait) + h.inFlight.Add(r.ContentLength) + defer func() { + // Any return will be a message handling completion and the + // the removal of the allocation from the queue assuming that + // the client has requested a timeout. Either we have an early + // error condition or timeout and the message is dropped, we + // have ACKed all the events in the request, or the input has + // been cancelled. + h.inFlight.Add(-r.ContentLength) + }() } start := time.Now() acker := newBatchACKTracker(func() { diff --git a/x-pack/filebeat/input/http_endpoint/handler_test.go b/x-pack/filebeat/input/http_endpoint/handler_test.go index 2ac763f052b1..623c9542cc15 100644 --- a/x-pack/filebeat/input/http_endpoint/handler_test.go +++ b/x-pack/filebeat/input/http_endpoint/handler_test.go @@ -192,6 +192,9 @@ type publisher struct { func (p *publisher) Publish(e beat.Event) { p.mu.Lock() p.events = append(p.events, e) + if ack, ok := e.Private.(*batchACKTracker); ok { + ack.ACK() + } p.mu.Unlock() } diff --git a/x-pack/filebeat/input/http_endpoint/input.go b/x-pack/filebeat/input/http_endpoint/input.go index b4ad07e7626b..6d6b0cbc3f41 100644 --- a/x-pack/filebeat/input/http_endpoint/input.go +++ b/x-pack/filebeat/input/http_endpoint/input.go @@ -347,6 +347,8 @@ func newHandler(ctx context.Context, c config, prg *program, pub func(beat.Event hmacType: c.HMACType, hmacPrefix: c.HMACPrefix, }, + maxInFlight: c.MaxInFlight, + retryAfter: c.RetryAfter, program: prg, messageField: c.Prefix, responseCode: c.ResponseCode, diff --git a/x-pack/filebeat/input/http_endpoint/input_test.go b/x-pack/filebeat/input/http_endpoint/input_test.go index 3f530454e1d8..9a3a2368a74a 100644 --- a/x-pack/filebeat/input/http_endpoint/input_test.go +++ b/x-pack/filebeat/input/http_endpoint/input_test.go @@ -10,6 +10,7 @@ import ( "errors" "io" "net/http" + "slices" "strings" "sync" "testing" @@ -24,19 +25,20 @@ import ( ) var serverPoolTests = []struct { - name string - method string - cfgs []*httpEndpoint - events []target - want []mapstr.M - wantErr error + name string + method string + cfgs []*httpEndpoint + events []target + want []mapstr.M + wantStatus int + wantErr error }{ { name: "single", cfgs: []*httpEndpoint{{ addr: "127.0.0.1:9001", config: config{ - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -50,6 +52,7 @@ var serverPoolTests = []struct { {url: "http://127.0.0.1:9001/", event: `{"b":2}`}, {url: "http://127.0.0.1:9001/", event: `{"c":3}`}, }, + wantStatus: http.StatusOK, want: []mapstr.M{ {"json": mapstr.M{"a": int64(1)}}, {"json": mapstr.M{"b": int64(2)}}, @@ -63,7 +66,7 @@ var serverPoolTests = []struct { addr: "127.0.0.1:9001", config: config{ Method: http.MethodPut, - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -77,6 +80,7 @@ var serverPoolTests = []struct { {url: "http://127.0.0.1:9001/", event: `{"b":2}`}, {url: "http://127.0.0.1:9001/", event: `{"c":3}`}, }, + wantStatus: http.StatusOK, want: []mapstr.M{ {"json": mapstr.M{"a": int64(1)}}, {"json": mapstr.M{"b": int64(2)}}, @@ -90,7 +94,7 @@ var serverPoolTests = []struct { addr: "127.0.0.1:9001", config: config{ Method: http.MethodPatch, - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -104,6 +108,7 @@ var serverPoolTests = []struct { {url: "http://127.0.0.1:9001/", event: `{"b":2}`}, {url: "http://127.0.0.1:9001/", event: `{"c":3}`}, }, + wantStatus: http.StatusOK, want: []mapstr.M{ {"json": mapstr.M{"a": int64(1)}}, {"json": mapstr.M{"b": int64(2)}}, @@ -116,7 +121,7 @@ var serverPoolTests = []struct { { addr: "127.0.0.1:9001", config: config{ - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -128,7 +133,7 @@ var serverPoolTests = []struct { { addr: "127.0.0.1:9002", config: config{ - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9002", @@ -143,6 +148,7 @@ var serverPoolTests = []struct { {url: "http://127.0.0.1:9002/b/", event: `{"b":2}`}, {url: "http://127.0.0.1:9001/a/", event: `{"c":3}`}, }, + wantStatus: http.StatusOK, want: []mapstr.M{ {"json": mapstr.M{"a": int64(1)}}, {"json": mapstr.M{"b": int64(2)}}, @@ -155,7 +161,7 @@ var serverPoolTests = []struct { { addr: "127.0.0.1:9001", config: config{ - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -167,7 +173,7 @@ var serverPoolTests = []struct { { addr: "127.0.0.1:9001", config: config{ - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -182,6 +188,7 @@ var serverPoolTests = []struct { {url: "http://127.0.0.1:9001/b/", event: `{"b":2}`}, {url: "http://127.0.0.1:9001/a/", event: `{"c":3}`}, }, + wantStatus: http.StatusOK, want: []mapstr.M{ {"json": mapstr.M{"a": int64(1)}}, {"json": mapstr.M{"b": int64(2)}}, @@ -194,7 +201,7 @@ var serverPoolTests = []struct { { addr: "127.0.0.1:9001", config: config{ - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -207,7 +214,7 @@ var serverPoolTests = []struct { addr: "127.0.0.1:9001", config: config{ TLS: &tlscommon.ServerConfig{}, - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -228,7 +235,7 @@ var serverPoolTests = []struct { TLS: &tlscommon.ServerConfig{ VerificationMode: tlscommon.VerifyStrict, }, - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -243,7 +250,7 @@ var serverPoolTests = []struct { TLS: &tlscommon.ServerConfig{ VerificationMode: tlscommon.VerifyNone, }, - ResponseCode: 200, + ResponseCode: http.StatusOK, ResponseBody: `{"message": "success"}`, ListenAddress: "127.0.0.1", ListenPort: "9001", @@ -255,11 +262,87 @@ var serverPoolTests = []struct { }, wantErr: invalidTLSStateErr{addr: "127.0.0.1:9001", reason: "configuration options do not agree"}, }, + { + name: "exceed_max_in_flight", + method: http.MethodPost, + cfgs: []*httpEndpoint{{ + addr: "127.0.0.1:9001", + config: config{ + Method: http.MethodPost, + ResponseCode: http.StatusOK, + ResponseBody: `{"message": "success"}`, + ListenAddress: "127.0.0.1", + ListenPort: "9001", + URL: "/", + Prefix: "json", + MaxInFlight: 2, + RetryAfter: 10, + ContentType: "application/json", + }, + }}, + events: []target{ + {url: "http://127.0.0.1:9001/?wait_for_completion_timeout=1s", event: `{"a":1}`, wantBody: `{"warn":"max in flight message memory exceeded","max_in_flight":2,"in_flight":7}`, wantHeader: http.Header{"Retry-After": {"10"}}}, + {url: "http://127.0.0.1:9001/?wait_for_completion_timeout=1s", event: `{"b":2}`, wantBody: `{"warn":"max in flight message memory exceeded","max_in_flight":2,"in_flight":7}`, wantHeader: http.Header{"Retry-After": {"10"}}}, + {url: "http://127.0.0.1:9001/?wait_for_completion_timeout=1s", event: `{"c":3}`, wantBody: `{"warn":"max in flight message memory exceeded","max_in_flight":2,"in_flight":7}`, wantHeader: http.Header{"Retry-After": {"10"}}}, + }, + wantStatus: http.StatusServiceUnavailable, + want: nil, + }, + { + name: "not_exceed_max_in_flight", + method: http.MethodPost, + cfgs: []*httpEndpoint{{ + addr: "127.0.0.1:9001", + config: config{ + Method: http.MethodPost, + ResponseCode: http.StatusOK, + ResponseBody: `{"message": "success"}`, + ListenAddress: "127.0.0.1", + ListenPort: "9001", + URL: "/", + Prefix: "json", + MaxInFlight: 20, + RetryAfter: 10, + ContentType: "application/json", + }, + }}, + events: []target{ + {url: "http://127.0.0.1:9001/?wait_for_completion_timeout=1s", event: `{"a":1}`, wantBody: `{"message": "success"}`, wantHeader: http.Header{"Retry-After": nil}}, + {url: "http://127.0.0.1:9001/?wait_for_completion_timeout=1s", event: `{"b":2}`, wantBody: `{"message": "success"}`, wantHeader: http.Header{"Retry-After": nil}}, + {url: "http://127.0.0.1:9001/?wait_for_completion_timeout=1s", event: `{"c":3}`, wantBody: `{"message": "success"}`, wantHeader: http.Header{"Retry-After": nil}}, + }, + wantStatus: http.StatusOK, + want: []mapstr.M{ + {"json": mapstr.M{"a": int64(1)}}, + {"json": mapstr.M{"b": int64(2)}}, + {"json": mapstr.M{"c": int64(3)}}, + }, + }, } type target struct { - url string - event string + url string + event string + wantBody string + wantHeader http.Header +} + +// isWantedHeader returns whether got includes the wanted header and that +// the values match. A nil value for a header in the receiver matches absence +// of that header in the got parameter. +func (t target) isWantedHeader(got http.Header) bool { + for h, v := range t.wantHeader { + if v == nil { + if _, ok := got[h]; ok { + return false + } + continue + } + if !slices.Equal(got[h], v) { + return false + } + } + return true } func TestServerPool(t *testing.T) { @@ -309,9 +392,15 @@ func TestServerPool(t *testing.T) { t.Fatalf("failed to post event #%d: %v", i, err) } body := dump(resp.Body) - if resp.StatusCode != http.StatusOK { - t.Errorf("unexpected response status code: %s (%d)\nresp: %s", - resp.Status, resp.StatusCode, body) + if resp.StatusCode != test.wantStatus { + t.Errorf("unexpected response status code: %s (%d), want: %d\nresp: %s", + resp.Status, resp.StatusCode, test.wantStatus, body) + } + if len(e.wantBody) != 0 && string(body) != e.wantBody { + t.Errorf("unexpected response body:\ngot: %s\nwant:%s", body, e.wantBody) + } + if !e.isWantedHeader(resp.Header) { + t.Errorf("unexpected header:\n--- want\n+++ got\n%s", cmp.Diff(e.wantHeader, resp.Header)) } } cancel() @@ -320,8 +409,8 @@ func TestServerPool(t *testing.T) { for _, e := range pub.events { got = append(got, e.Fields) } - if !cmp.Equal(got, test.want) { - t.Errorf("unexpected result:\n--- got\n--- want\n%s", cmp.Diff(got, test.want)) + if !cmp.Equal(test.want, got) { + t.Errorf("unexpected result:\n--- want\n+++ got\n%s", cmp.Diff(test.want, got)) } // Try to re-register the same addresses. diff --git a/x-pack/libbeat/management/tests/mbtest/metricbeat_v2_test.go b/x-pack/libbeat/management/tests/mbtest/metricbeat_v2_test.go index fb8542514c2c..4ffa583120b4 100644 --- a/x-pack/libbeat/management/tests/mbtest/metricbeat_v2_test.go +++ b/x-pack/libbeat/management/tests/mbtest/metricbeat_v2_test.go @@ -34,8 +34,9 @@ var expectedMBStreams = &proto.UnitExpectedConfig{ } func TestSingleMetricbeatMetricsetWithProcessors(t *testing.T) { - tests.InitBeatsForTest(t, cmd.RootCmd) - var mbStreams = []*proto.Stream{ + mbCmd := cmd.Initialize() + tests.InitBeatsForTest(t, mbCmd) + mbStreams := []*proto.Stream{ { Id: "system/metrics-system.cpu-default-system", DataStream: &proto.DataStream{ @@ -79,7 +80,7 @@ func TestSingleMetricbeatMetricsetWithProcessors(t *testing.T) { go func() { t.Logf("Running beats...") - err := cmd.RootCmd.Execute() + err := mbCmd.Execute() require.NoError(t, err) }() diff --git a/x-pack/libbeat/management/tests/mbtest/system/process_integration_test.go b/x-pack/libbeat/management/tests/mbtest/system/process_integration_test.go index 660e95255582..3f9361823337 100644 --- a/x-pack/libbeat/management/tests/mbtest/system/process_integration_test.go +++ b/x-pack/libbeat/management/tests/mbtest/system/process_integration_test.go @@ -37,7 +37,8 @@ func TestProcessStatusReporter(t *testing.T) { unitOutID := mock.NewID() token := mock.NewID() - tests.InitBeatsForTest(t, cmd.RootCmd) + mbCmd := cmd.Initialize() + tests.InitBeatsForTest(t, mbCmd) filename := fmt.Sprintf("test-%d", time.Now().Unix()) outPath := filepath.Join(t.TempDir(), filename) @@ -122,7 +123,7 @@ func TestProcessStatusReporter(t *testing.T) { go func() { t.Logf("Running beats...") - err := cmd.RootCmd.Execute() + err := mbCmd.Execute() require.NoError(t, err) }() diff --git a/x-pack/metricbeat/cmd/root.go b/x-pack/metricbeat/cmd/root.go index 47e169b1c956..76ca40ddf131 100644 --- a/x-pack/metricbeat/cmd/root.go +++ b/x-pack/metricbeat/cmd/root.go @@ -33,9 +33,6 @@ const ( Name = "metricbeat" ) -// RootCmd to handle beats cli -var RootCmd *cmd.BeatsRootCmd - // withECSVersion is a modifier that adds ecs.version to events. var withECSVersion = processing.WithFields(mapstr.M{ "ecs": mapstr.M{ @@ -43,7 +40,7 @@ var withECSVersion = processing.WithFields(mapstr.M{ }, }) -func init() { +func Initialize() *cmd.BeatsRootCmd { globalProcs, err := processors.NewPluginConfigFromList(defaultProcessors()) if err != nil { // these are hard-coded, shouldn't fail panic(fmt.Errorf("error creating global processors: %w", err)) @@ -51,12 +48,13 @@ func init() { settings := mbcmd.MetricbeatSettings("") settings.ElasticLicensed = true settings.Processing = processing.MakeDefaultSupport(true, globalProcs, withECSVersion, processing.WithHost, processing.WithAgentMeta()) - RootCmd = cmd.GenRootCmdWithSettings(beater.DefaultCreator(), settings) - RootCmd.AddCommand(cmd.GenModulesCmd(Name, "", mbcmd.BuildModulesManager)) - RootCmd.TestCmd.AddCommand(test.GenTestModulesCmd(Name, "", beater.DefaultTestModulesCreator())) - RootCmd.PersistentPreRun = func(cmd *cobra.Command, args []string) { + rootCmd := cmd.GenRootCmdWithSettings(beater.DefaultCreator(), settings) + rootCmd.AddCommand(cmd.GenModulesCmd(Name, "", mbcmd.BuildModulesManager)) + rootCmd.TestCmd.AddCommand(test.GenTestModulesCmd(Name, "", beater.DefaultTestModulesCreator())) + rootCmd.PersistentPreRun = func(cmd *cobra.Command, args []string) { management.ConfigTransform.SetTransform(metricbeatCfg) } + return rootCmd } func defaultProcessors() []mapstr.M { diff --git a/x-pack/metricbeat/main.go b/x-pack/metricbeat/main.go index 92469da9c174..08afded3254f 100644 --- a/x-pack/metricbeat/main.go +++ b/x-pack/metricbeat/main.go @@ -19,7 +19,7 @@ import ( ) func main() { - if err := cmd.RootCmd.Execute(); err != nil { + if err := cmd.Initialize().Execute(); err != nil { os.Exit(1) } } diff --git a/x-pack/metricbeat/main_test.go b/x-pack/metricbeat/main_test.go index e96a9932765c..906782e1f5a5 100644 --- a/x-pack/metricbeat/main_test.go +++ b/x-pack/metricbeat/main_test.go @@ -6,21 +6,27 @@ package main // This file is mandatory as otherwise the metricbeat.test binary is not generated correctly. import ( "flag" + "os" "testing" "github.com/elastic/beats/v7/libbeat/cfgfile" + cmd "github.com/elastic/beats/v7/libbeat/cmd" "github.com/elastic/beats/v7/libbeat/tests/system/template" - "github.com/elastic/beats/v7/x-pack/metricbeat/cmd" + mbcmd "github.com/elastic/beats/v7/x-pack/metricbeat/cmd" ) -var systemTest *bool +var ( + systemTest *bool + mbCommand *cmd.BeatsRootCmd +) func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") - cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) + mbCommand = mbcmd.Initialize() + mbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) cfgfile.AddAllowedBackwardsCompatibleFlag("systemTest") - cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) + mbCommand.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) cfgfile.AddAllowedBackwardsCompatibleFlag("test.coverprofile") } @@ -28,10 +34,12 @@ func init() { func TestSystem(t *testing.T) { cfgfile.ConvertFlagsForBackwardsCompatibility() if *systemTest { - main() + if err := mbCommand.Execute(); err != nil { + os.Exit(1) + } } } func TestTemplate(t *testing.T) { - template.TestTemplate(t, cmd.Name, true) + template.TestTemplate(t, mbCommand.Name(), true) } diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 7a32f16c19c8..f7f048da84bd 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -618,6 +618,7 @@ metricbeat.modules: credentials_file_path: "your JSON credentials file path" exclude_labels: false period: 1m + location_label: "resource.labels.zone" metrics: - aligner: ALIGN_NONE service: compute diff --git a/x-pack/metricbeat/module/gcp/_meta/config.yml b/x-pack/metricbeat/module/gcp/_meta/config.yml index ad0c7da852fa..55001654db98 100644 --- a/x-pack/metricbeat/module/gcp/_meta/config.yml +++ b/x-pack/metricbeat/module/gcp/_meta/config.yml @@ -34,6 +34,7 @@ credentials_file_path: "your JSON credentials file path" exclude_labels: false period: 1m + location_label: "resource.labels.zone" metrics: - aligner: ALIGN_NONE service: compute diff --git a/x-pack/metricbeat/module/gcp/distribution.go b/x-pack/metricbeat/module/gcp/distribution.go index aae21e1d58e5..f77b3afc7112 100644 --- a/x-pack/metricbeat/module/gcp/distribution.go +++ b/x-pack/metricbeat/module/gcp/distribution.go @@ -53,7 +53,7 @@ func calcLinearUpperBound(bucket *distribution.Distribution_BucketOptions_Linear return bucket.Offset + (bucket.Width * float64(i)) } -func createHistogram(values []float64, counts []uint64) mapstr.M { +func createHistogram(values []float64, counts []int64) mapstr.M { return mapstr.M{ "values": values, "counts": counts, @@ -62,11 +62,11 @@ func createHistogram(values []float64, counts []uint64) mapstr.M { func DistributionHistogramToES(d *distribution.Distribution) mapstr.M { if !containsHistogram(d) { - return createHistogram([]float64{}, []uint64{}) + return createHistogram([]float64{}, []int64{}) } values := make([]float64, 0, len(d.BucketCounts)) - counts := make([]uint64, 0, len(d.BucketCounts)) + counts := make([]int64, 0, len(d.BucketCounts)) switch { case d.BucketOptions.GetExplicitBuckets() != nil: @@ -79,19 +79,17 @@ func DistributionHistogramToES(d *distribution.Distribution) mapstr.M { bucket := d.BucketOptions.GetExponentialBuckets() for i := range d.BucketCounts { - values = append(values, calcExponentialUpperBound(bucket, i+1)) + values = append(values, calcExponentialUpperBound(bucket, i)) } case d.BucketOptions.GetLinearBuckets() != nil: bucket := d.BucketOptions.GetLinearBuckets() for i := range d.BucketCounts { - values = append(values, calcLinearUpperBound(bucket, i+1)) + values = append(values, calcLinearUpperBound(bucket, i)) } } - for i := range d.BucketCounts { - counts = append(counts, uint64(d.BucketCounts[i])) - } + counts = append(counts, d.BucketCounts...) return createHistogram(values, counts) } diff --git a/x-pack/metricbeat/module/gcp/distribution_test.go b/x-pack/metricbeat/module/gcp/distribution_test.go index e2f582653beb..3838a9e9dd3a 100644 --- a/x-pack/metricbeat/module/gcp/distribution_test.go +++ b/x-pack/metricbeat/module/gcp/distribution_test.go @@ -40,7 +40,7 @@ func TestDistributionHistogramToES(t *testing.T) { }, }, expected: mapstr.M{ - "counts": []uint64{0, 0, 0, 6, 1, 1}, + "counts": []int64{0, 0, 0, 6, 1, 1}, "values": []float64{0, 1, 2, 5, 10, 20}, }, }, @@ -63,8 +63,8 @@ func TestDistributionHistogramToES(t *testing.T) { }, }, expected: mapstr.M{ - "counts": []uint64{0, 0, 3, 1}, - "values": []float64{6, 12, 24, 48}, + "counts": []int64{0, 0, 3, 1}, + "values": []float64{3, 6, 12, 24}, }, }, }, @@ -86,8 +86,8 @@ func TestDistributionHistogramToES(t *testing.T) { }, }, expected: mapstr.M{ - "counts": []uint64{0, 1, 2, 0}, - "values": []float64{20, 35, 50, 65}, + "counts": []int64{0, 1, 2, 0}, + "values": []float64{5, 20, 35, 50}, }, }, }, @@ -107,7 +107,7 @@ func TestDistributionHistogramToES(t *testing.T) { }, }, expected: mapstr.M{ - "counts": []uint64{}, + "counts": []int64{}, "values": []float64{}, }, }, diff --git a/x-pack/metricbeat/module/gcp/metrics/_meta/docs.asciidoc b/x-pack/metricbeat/module/gcp/metrics/_meta/docs.asciidoc index 642df10f4962..40dc27e80e8f 100644 --- a/x-pack/metricbeat/module/gcp/metrics/_meta/docs.asciidoc +++ b/x-pack/metricbeat/module/gcp/metrics/_meta/docs.asciidoc @@ -45,6 +45,8 @@ services under "Google Cloud metrics", but does not work for other services (`kubernetes` aka GKE for example). This option allow to override the default and specify an arbitrary metric prefix. +* *location_label*: Use this option to specify the resource label that identifies the location (such as zone or region) for a Google Cloud service when filtering metrics. For example, labels like `resource.label.location` or `resource.label.zone` are used by Google Cloud to represent the region or zone of a resource. This is an optional configuration for the user. + [float] === Example Configuration * `metrics` metricset is enabled to collect metrics from all zones under @@ -134,3 +136,28 @@ metric prefix, as for GKE metrics the required prefix is `kubernetes.io/` metric_types: - "container/cpu/core_usage_time" ---- + +* `metrics` metricset is enabled to collect metrics from region +`us-east4` in `elastic-observability` project. The metric, number of replicas of the prediction model is +collected from a new GCP service `aiplatform`. Since its a new service which is not supported by +default in this metricset, the user provides the servicelabel (resource.label.location), for which +user wants to filter the incoming data + ++ +[source,yaml] +---- +- module: gcp + metricsets: + - metrics + project_id: "elastic-observability" + credentials_json: "your JSON credentials" + exclude_labels: false + period: 1m + location_label: "resource.label.location" # This is an optional configuration + regions: + - us-east4 + metrics: + - service: aiplatform + metric_types: + - "prediction/online/replicas" +---- \ No newline at end of file diff --git a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go index ad0632e6c852..915ff63190f0 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go +++ b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go @@ -251,7 +251,7 @@ func (r *metricsRequester) getFilterForMetric(serviceName, m string) string { // NOTE: some GCP services are global, not regional or zonal. To these services we don't need // to apply any additional filters. if locationsConfigsAvailable && !isAGlobalService(serviceName) { - serviceLabel := getServiceLabelFor(serviceName) + serviceLabel := r.getServiceLabel(serviceName) f = r.buildLocationFilter(serviceLabel, f) } @@ -261,6 +261,16 @@ func (r *metricsRequester) getFilterForMetric(serviceName, m string) string { return f } +// getServiceLabel determines the service label to be used for the given service name. If a custom +// location label is configured, it will be used. Otherwise, the default service label for the +// given service name will be returned. +func (r *metricsRequester) getServiceLabel(serviceName string) string { + if r.config.LocationLabel != "" { + return r.config.LocationLabel + } + return getServiceLabelFor(serviceName) +} + // Returns a GCP TimeInterval based on the ingestDelay and samplePeriod from ListMetricDescriptor func getTimeIntervalAligner(ingestDelay time.Duration, samplePeriod time.Duration, collectionPeriod *durationpb.Duration, inputAligner string) (*monitoringpb.TimeInterval, string) { var startTime, endTime, currentTime time.Time diff --git a/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go b/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go index 9fb044e39e5f..20f8a5d9e366 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go +++ b/x-pack/metricbeat/module/gcp/metrics/metrics_requester_test.go @@ -128,6 +128,13 @@ func TestGetFilterForMetric(t *testing.T) { metricsRequester{config: config{Region: "foobar", Regions: []string{"foo", "bar"}}, logger: logger}, "metric.type=\"dummy\" AND resource.labels.zone = starts_with(\"foobar\")", }, + { + "aiplatform service with configured region and zone", + "aiplatform", + "", + metricsRequester{config: config{Region: "foo", Zone: "bar", LocationLabel: "resource.label.location"}, logger: logger}, + "metric.type=\"dummy\" AND resource.label.location = starts_with(\"foo\")", + }, } for _, c := range cases { diff --git a/x-pack/metricbeat/module/gcp/metrics/metricset.go b/x-pack/metricbeat/module/gcp/metrics/metricset.go index f5b15d68fb3e..660f1936f893 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metricset.go +++ b/x-pack/metricbeat/module/gcp/metrics/metricset.go @@ -103,6 +103,7 @@ type config struct { Zone string `config:"zone"` Region string `config:"region"` Regions []string `config:"regions"` + LocationLabel string `config:"location_label"` ProjectID string `config:"project_id" validate:"required"` ExcludeLabels bool `config:"exclude_labels"` CredentialsFilePath string `config:"credentials_file_path"` diff --git a/x-pack/metricbeat/module/gcp/pubsub/_meta/data_topic.json b/x-pack/metricbeat/module/gcp/pubsub/_meta/data_topic.json index 1453f9f379c7..cb6585606dfd 100644 --- a/x-pack/metricbeat/module/gcp/pubsub/_meta/data_topic.json +++ b/x-pack/metricbeat/module/gcp/pubsub/_meta/data_topic.json @@ -12,25 +12,112 @@ "module": "gcp" }, "gcp": { - "labels": { - "resource": { - "topic_id": "test-ks" - } - }, "pubsub": { "topic": { "message_sizes": { - "bucket_options": { - "Options": { - "ExponentialBuckets": { - "num_finite_buckets": 16, - "growth_factor": 4, - "scale": 1 - } + "bytes": { + "histogram": { + "values": [ + 1, + 1.2, + 1.44, + 1.728, + 2.0736, + 2.48832, + 2.9859839999999997, + 3.5831807999999996, + 4.299816959999999, + 5.159780351999999, + 6.191736422399999, + 7.430083706879999, + 8.916100448255998, + 10.699320537907198, + 12.839184645488636, + 15.407021574586365, + 18.488425889503635, + 22.18611106740436, + 26.623333280885234, + 31.94799993706228, + 38.33759992447474, + 46.00511990936968, + 55.20614389124361, + 66.24737266949234, + 79.4968472033908, + 95.39621664406897, + 114.47545997288276, + 137.3705519674593, + 164.84466236095116, + 197.8135948331414, + 237.37631379976966, + 284.8515765597236, + 341.82189187166824, + 410.18627024600187, + 492.22352429520225, + 590.6682291542427, + 708.8018749850912, + 850.5622499821095, + 1020.6746999785313, + 1224.8096399742376, + 1469.771567969085, + 1763.725881562902, + 2116.4710578754825, + 2539.7652694505787 + ], + "counts": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 10, + 3, + 2 + ] } } } } + }, + "labels": { + "resource": { + "topic_id": "test-topic" + } } }, "metricset": { diff --git a/x-pack/metricbeat/module/mssql/connection.go b/x-pack/metricbeat/module/mssql/connection.go index ac328491ddb2..cc8b0683e5ac 100644 --- a/x-pack/metricbeat/module/mssql/connection.go +++ b/x-pack/metricbeat/module/mssql/connection.go @@ -9,7 +9,7 @@ import ( "fmt" // Register driver. - _ "github.com/denisenkom/go-mssqldb" + _ "github.com/microsoft/go-mssqldb" ) // NewConnection returns a connection already established with MSSQL diff --git a/x-pack/metricbeat/module/mssql/performance/data_integration_test.go b/x-pack/metricbeat/module/mssql/performance/data_integration_test.go index 0d3d1d8fc781..0e351805d80f 100644 --- a/x-pack/metricbeat/module/mssql/performance/data_integration_test.go +++ b/x-pack/metricbeat/module/mssql/performance/data_integration_test.go @@ -5,11 +5,9 @@ package performance import ( - "errors" - "net/url" "testing" - _ "github.com/denisenkom/go-mssqldb" + _ "github.com/microsoft/go-mssqldb" "github.com/stretchr/testify/assert" mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" @@ -24,30 +22,3 @@ func TestData(t *testing.T) { err := mbtest.WriteEventsReporterV2(f, t, "") assert.NoError(t, err) } - -func getHostURI() (string, map[string]interface{}, error) { - config := mtest.GetConfig("performance") - - host, ok := config["hosts"].([]string) - if !ok { - return "", nil, errors.New("error getting host name information") - } - - username, ok := config["username"].(string) - if !ok { - return "", nil, errors.New("error getting username information") - } - - password, ok := config["password"].(string) - if !ok { - return "", nil, errors.New("error getting password information") - } - - u := &url.URL{ - Scheme: "sqlserver", - User: url.UserPassword(username, password), - Host: host[0], - } - - return u.String(), config, nil -} diff --git a/x-pack/metricbeat/modules.d/gcp.yml.disabled b/x-pack/metricbeat/modules.d/gcp.yml.disabled index c8cddd198ce3..6bb0c9bad2a8 100644 --- a/x-pack/metricbeat/modules.d/gcp.yml.disabled +++ b/x-pack/metricbeat/modules.d/gcp.yml.disabled @@ -37,6 +37,7 @@ credentials_file_path: "your JSON credentials file path" exclude_labels: false period: 1m + location_label: "resource.labels.zone" metrics: - aligner: ALIGN_NONE service: compute