From 4ece05165c4b6e6b46120cdf191e9a4db958a154 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 28 Aug 2024 14:12:08 -0700 Subject: [PATCH 01/10] fix(deps): update all github.com/aws packages (#34853) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [github.com/aws/aws-sdk-go-v2/config](https://togithub.com/aws/aws-sdk-go-v2) | `v1.27.28` -> `v1.27.31` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2faws%2faws-sdk-go-v2%2fconfig/v1.27.31?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2faws%2faws-sdk-go-v2%2fconfig/v1.27.31?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2faws%2faws-sdk-go-v2%2fconfig/v1.27.28/v1.27.31?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2faws%2faws-sdk-go-v2%2fconfig/v1.27.28/v1.27.31?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [github.com/aws/aws-sdk-go-v2/credentials](https://togithub.com/aws/aws-sdk-go-v2) | `v1.17.28` -> `v1.17.30` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2faws%2faws-sdk-go-v2%2fcredentials/v1.17.30?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2faws%2faws-sdk-go-v2%2fcredentials/v1.17.30?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2faws%2faws-sdk-go-v2%2fcredentials/v1.17.28/v1.17.30?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2faws%2faws-sdk-go-v2%2fcredentials/v1.17.28/v1.17.30?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [github.com/aws/aws-sdk-go-v2/feature/s3/manager](https://togithub.com/aws/aws-sdk-go-v2) | `v1.17.11` -> `v1.17.15` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2faws%2faws-sdk-go-v2%2ffeature%2fs3%2fmanager/v1.17.15?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2faws%2faws-sdk-go-v2%2ffeature%2fs3%2fmanager/v1.17.15?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2faws%2faws-sdk-go-v2%2ffeature%2fs3%2fmanager/v1.17.11/v1.17.15?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2faws%2faws-sdk-go-v2%2ffeature%2fs3%2fmanager/v1.17.11/v1.17.15?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [github.com/aws/aws-sdk-go-v2/service/kinesis](https://togithub.com/aws/aws-sdk-go-v2) | `v1.29.4` -> `v1.29.5` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fkinesis/v1.29.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fkinesis/v1.29.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fkinesis/v1.29.4/v1.29.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fkinesis/v1.29.4/v1.29.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [github.com/aws/aws-sdk-go-v2/service/s3](https://togithub.com/aws/aws-sdk-go-v2) | `v1.59.0` -> `v1.60.1` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fs3/v1.60.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fs3/v1.60.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fs3/v1.59.0/v1.60.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fs3/v1.59.0/v1.60.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [github.com/aws/aws-sdk-go-v2/service/secretsmanager](https://togithub.com/aws/aws-sdk-go-v2) | `v1.32.5` -> `v1.32.6` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fsecretsmanager/v1.32.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fsecretsmanager/v1.32.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fsecretsmanager/v1.32.5/v1.32.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fsecretsmanager/v1.32.5/v1.32.6?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [github.com/aws/aws-sdk-go-v2/service/servicediscovery](https://togithub.com/aws/aws-sdk-go-v2) | `v1.31.4` -> `v1.31.5` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fservicediscovery/v1.31.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fservicediscovery/v1.31.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fservicediscovery/v1.31.4/v1.31.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fservicediscovery/v1.31.4/v1.31.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [github.com/aws/aws-sdk-go-v2/service/sts](https://togithub.com/aws/aws-sdk-go-v2) | `v1.30.4` -> `v1.30.5` | [![age](https://developer.mend.io/api/mc/badges/age/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fsts/v1.30.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fsts/v1.30.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fsts/v1.30.4/v1.30.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/github.com%2faws%2faws-sdk-go-v2%2fservice%2fsts/v1.30.4/v1.30.5?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Configuration ๐Ÿ“… **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). ๐Ÿšฆ **Automerge**: Disabled by config. Please merge this manually once you are satisfied. โ™ป **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. ๐Ÿ‘ป **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://togithub.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com> Co-authored-by: Yang Song --- cmd/otelcontribcol/go.mod | 14 +++++----- cmd/otelcontribcol/go.sum | 28 +++++++++---------- confmap/provider/s3provider/go.mod | 8 +++--- confmap/provider/s3provider/go.sum | 16 +++++------ .../provider/secretsmanagerprovider/go.mod | 8 +++--- .../provider/secretsmanagerprovider/go.sum | 16 +++++------ exporter/awskinesisexporter/go.mod | 8 +++--- exporter/awskinesisexporter/go.sum | 16 +++++------ exporter/loadbalancingexporter/go.mod | 8 +++--- exporter/loadbalancingexporter/go.sum | 16 +++++------ extension/sigv4authextension/go.mod | 6 ++-- extension/sigv4authextension/go.sum | 12 ++++---- receiver/awss3receiver/go.mod | 10 +++---- receiver/awss3receiver/go.sum | 20 ++++++------- 14 files changed, 93 insertions(+), 93 deletions(-) diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index bb1b892a41e4..5363417feba6 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -395,8 +395,8 @@ require ( github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/aws/aws-sdk-go-v2 v1.30.4 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.28 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.28 // indirect + github.com/aws/aws-sdk-go-v2/config v1.27.31 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.30 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect @@ -407,13 +407,13 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect - github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.4 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 // indirect - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.5 // indirect - github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.4 // indirect + github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 // indirect + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6 // indirect + github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.5 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 // indirect github.com/aws/smithy-go v1.20.4 // indirect github.com/axiomhq/hyperloglog v0.0.0-20230201085229-3ddf4bad03dc // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index c4a23f4000f3..aa35c621052b 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -1013,11 +1013,11 @@ github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6Wtu github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= +github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= @@ -1043,14 +1043,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHC github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.4 h1:Tu/EuXZH2pJD4Fcz1UmkLXcquc5xTvVLjhCQeBhTnQ4= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.4/go.mod h1:pKTvEQz1PcNd+gKArVyeHpVM63AWnFqYyg07WAQQANQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 h1:Cso4Ev/XauMVsbwdhYEoxg8rxZWw43CFqqaPB5w3W2c= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.5 h1:UDXu9dqpCZYonj7poM4kFISjzTdWI0v3WUusM+w+Gfc= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.5/go.mod h1:5NPkI3RsTOhwz1CuG7VVSgJCm3CINKkoIaUbUZWQ67w= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.4 h1:YEY+Y4Lf3TuFrw8keb8NZ5nsbo/YplxEgZWbqnDlq+Y= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.4/go.mod h1:5autx6GwAtQVv8S/qTwBKfxzAAwe8hOlzVuTtLdliVw= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 h1:iirGMva2IXw4kcqsvuF+uc8ARweuVqoQJjzRZGaiV1E= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5/go.mod h1:pKTvEQz1PcNd+gKArVyeHpVM63AWnFqYyg07WAQQANQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 h1:mx2ucgtv+MWzJesJY9Ig/8AFHgoE5FwLXwUVgW/FGdI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6 h1:3TZlWvCC813uhS1Z4fVTmBhg41OYUrgSlvXqIDDkurw= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6/go.mod h1:5NPkI3RsTOhwz1CuG7VVSgJCm3CINKkoIaUbUZWQ67w= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.5 h1:z7nPig/pFU+TAAKouI51pCVQPEeQHZC2mZXSK+g0Av8= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.5/go.mod h1:5autx6GwAtQVv8S/qTwBKfxzAAwe8hOlzVuTtLdliVw= github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= @@ -1058,8 +1058,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5 github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= diff --git a/confmap/provider/s3provider/go.mod b/confmap/provider/s3provider/go.mod index 9bc2265730ed..827415b644b4 100644 --- a/confmap/provider/s3provider/go.mod +++ b/confmap/provider/s3provider/go.mod @@ -4,8 +4,8 @@ go 1.22.0 require ( github.com/aws/aws-sdk-go-v2 v1.30.4 - github.com/aws/aws-sdk-go-v2/config v1.27.28 - github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 + github.com/aws/aws-sdk-go-v2/config v1.27.31 + github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/confmap v1.14.1 go.uber.org/goleak v1.3.0 @@ -14,7 +14,7 @@ require ( require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.28 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.30 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect @@ -26,7 +26,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 // indirect github.com/aws/smithy-go v1.20.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-viper/mapstructure/v2 v2.1.0 // indirect diff --git a/confmap/provider/s3provider/go.sum b/confmap/provider/s3provider/go.sum index 44a41f8ac4f2..81fac451d7cc 100644 --- a/confmap/provider/s3provider/go.sum +++ b/confmap/provider/s3provider/go.sum @@ -2,10 +2,10 @@ github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDag github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= +github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= @@ -24,14 +24,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHC github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 h1:Cso4Ev/XauMVsbwdhYEoxg8rxZWw43CFqqaPB5w3W2c= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 h1:mx2ucgtv+MWzJesJY9Ig/8AFHgoE5FwLXwUVgW/FGdI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= diff --git a/confmap/provider/secretsmanagerprovider/go.mod b/confmap/provider/secretsmanagerprovider/go.mod index a345b36064c3..e367ce1afc8b 100644 --- a/confmap/provider/secretsmanagerprovider/go.mod +++ b/confmap/provider/secretsmanagerprovider/go.mod @@ -3,15 +3,15 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/confmap/provide go 1.22.0 require ( - github.com/aws/aws-sdk-go-v2/config v1.27.28 - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.5 + github.com/aws/aws-sdk-go-v2/config v1.27.31 + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/confmap v1.14.1 ) require ( github.com/aws/aws-sdk-go-v2 v1.30.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.28 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.30 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect @@ -20,7 +20,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 // indirect github.com/aws/smithy-go v1.20.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/go-viper/mapstructure/v2 v2.1.0 // indirect diff --git a/confmap/provider/secretsmanagerprovider/go.sum b/confmap/provider/secretsmanagerprovider/go.sum index 4da0ed9586ce..be466e8a90dc 100644 --- a/confmap/provider/secretsmanagerprovider/go.sum +++ b/confmap/provider/secretsmanagerprovider/go.sum @@ -15,11 +15,11 @@ github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVj github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= +github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= @@ -36,16 +36,16 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1: github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.5 h1:UDXu9dqpCZYonj7poM4kFISjzTdWI0v3WUusM+w+Gfc= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.5/go.mod h1:5NPkI3RsTOhwz1CuG7VVSgJCm3CINKkoIaUbUZWQ67w= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6 h1:3TZlWvCC813uhS1Z4fVTmBhg41OYUrgSlvXqIDDkurw= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.6/go.mod h1:5NPkI3RsTOhwz1CuG7VVSgJCm3CINKkoIaUbUZWQ67w= github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= diff --git a/exporter/awskinesisexporter/go.mod b/exporter/awskinesisexporter/go.mod index 3b1a761b3bc9..5a334df0192f 100644 --- a/exporter/awskinesisexporter/go.mod +++ b/exporter/awskinesisexporter/go.mod @@ -4,10 +4,10 @@ go 1.22.0 require ( github.com/aws/aws-sdk-go-v2 v1.30.4 - github.com/aws/aws-sdk-go-v2/config v1.27.28 - github.com/aws/aws-sdk-go-v2/credentials v1.17.28 - github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.4 - github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 + github.com/aws/aws-sdk-go-v2/config v1.27.31 + github.com/aws/aws-sdk-go-v2/credentials v1.17.30 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 + github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 github.com/cenkalti/backoff/v4 v4.3.0 github.com/gogo/protobuf v1.3.2 github.com/google/uuid v1.6.0 diff --git a/exporter/awskinesisexporter/go.sum b/exporter/awskinesisexporter/go.sum index f0b5adc78892..59c950cd9f60 100644 --- a/exporter/awskinesisexporter/go.sum +++ b/exporter/awskinesisexporter/go.sum @@ -4,10 +4,10 @@ github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDag github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= +github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= @@ -20,14 +20,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbL github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.4 h1:Tu/EuXZH2pJD4Fcz1UmkLXcquc5xTvVLjhCQeBhTnQ4= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.4/go.mod h1:pKTvEQz1PcNd+gKArVyeHpVM63AWnFqYyg07WAQQANQ= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5 h1:iirGMva2IXw4kcqsvuF+uc8ARweuVqoQJjzRZGaiV1E= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.5/go.mod h1:pKTvEQz1PcNd+gKArVyeHpVM63AWnFqYyg07WAQQANQ= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= diff --git a/exporter/loadbalancingexporter/go.mod b/exporter/loadbalancingexporter/go.mod index f374c7a8df51..c2f47d03231c 100644 --- a/exporter/loadbalancingexporter/go.mod +++ b/exporter/loadbalancingexporter/go.mod @@ -3,8 +3,8 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadba go 1.22.0 require ( - github.com/aws/aws-sdk-go-v2/config v1.27.28 - github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.4 + github.com/aws/aws-sdk-go-v2/config v1.27.31 + github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.5 github.com/aws/smithy-go v1.20.4 github.com/json-iterator/go v1.1.12 github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.108.0 @@ -39,7 +39,7 @@ require ( require ( github.com/aws/aws-sdk-go-v2 v1.30.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.28 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.30 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect @@ -48,7 +48,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect diff --git a/exporter/loadbalancingexporter/go.sum b/exporter/loadbalancingexporter/go.sum index 2a88ed644cd8..03673df1f25b 100644 --- a/exporter/loadbalancingexporter/go.sum +++ b/exporter/loadbalancingexporter/go.sum @@ -1,9 +1,9 @@ github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= +github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= @@ -16,14 +16,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbL github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.4 h1:YEY+Y4Lf3TuFrw8keb8NZ5nsbo/YplxEgZWbqnDlq+Y= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.4/go.mod h1:5autx6GwAtQVv8S/qTwBKfxzAAwe8hOlzVuTtLdliVw= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.5 h1:z7nPig/pFU+TAAKouI51pCVQPEeQHZC2mZXSK+g0Av8= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.5/go.mod h1:5autx6GwAtQVv8S/qTwBKfxzAAwe8hOlzVuTtLdliVw= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= diff --git a/extension/sigv4authextension/go.mod b/extension/sigv4authextension/go.mod index 1cdf344f6b1e..18f033e930ff 100644 --- a/extension/sigv4authextension/go.mod +++ b/extension/sigv4authextension/go.mod @@ -4,9 +4,9 @@ go 1.22.0 require ( github.com/aws/aws-sdk-go-v2 v1.30.4 - github.com/aws/aws-sdk-go-v2/config v1.27.28 - github.com/aws/aws-sdk-go-v2/credentials v1.17.28 - github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 + github.com/aws/aws-sdk-go-v2/config v1.27.31 + github.com/aws/aws-sdk-go-v2/credentials v1.17.30 + github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.108.1 go.opentelemetry.io/collector/confmap v1.14.1 diff --git a/extension/sigv4authextension/go.sum b/extension/sigv4authextension/go.sum index a1c7f9a9e041..427aedf0822a 100644 --- a/extension/sigv4authextension/go.sum +++ b/extension/sigv4authextension/go.sum @@ -1,9 +1,9 @@ github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= +github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= @@ -20,8 +20,8 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/ github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= diff --git a/receiver/awss3receiver/go.mod b/receiver/awss3receiver/go.mod index 1157c4aa85c2..de70d493fbaf 100644 --- a/receiver/awss3receiver/go.mod +++ b/receiver/awss3receiver/go.mod @@ -4,9 +4,9 @@ go 1.22.0 require ( github.com/aws/aws-sdk-go-v2 v1.30.4 - github.com/aws/aws-sdk-go-v2/config v1.27.28 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.11 - github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 + github.com/aws/aws-sdk-go-v2/config v1.27.31 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15 + github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.108.1 go.opentelemetry.io/collector/confmap v1.14.1 @@ -22,7 +22,7 @@ require ( require ( github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.28 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.30 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect @@ -34,7 +34,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 // indirect github.com/aws/smithy-go v1.20.4 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect diff --git a/receiver/awss3receiver/go.sum b/receiver/awss3receiver/go.sum index df5c917b80c9..a43bde60808e 100644 --- a/receiver/awss3receiver/go.sum +++ b/receiver/awss3receiver/go.sum @@ -2,14 +2,14 @@ github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDag github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= +github.com/aws/aws-sdk-go-v2/config v1.27.31 h1:kxBoRsjhT3pq0cKthgj6RU6bXTm/2SgdoUMyrVw0rAI= +github.com/aws/aws-sdk-go-v2/config v1.27.31/go.mod h1:z04nZdSWFPaDwK3DdJOG2r+scLQzMYuJeW0CujEm9FM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30 h1:aau/oYFtibVovr2rDt8FHlU17BTicFEMAi29V1U+L5Q= +github.com/aws/aws-sdk-go-v2/credentials v1.17.30/go.mod h1:BPJ/yXV92ZVq6G8uYvbU0gSl8q94UB63nMT5ctNO38g= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.11 h1:FEDZD/Axt5tKSkPAs967KZ++MkvYdBqr0a+cetRbjLM= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.11/go.mod h1:dvlsbA32KfvCzqwTiX7maABgFek2RyUuYEJ3kyn/PmQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15 h1:ijB7hr56MngOiELJe0C5aQRaBQ11LveNgWFyG02AUto= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.15/go.mod h1:0QEmQSSWMVfiAk93l1/ayR9DQ9+jwni7gHS2NARZXB0= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= @@ -26,14 +26,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHC github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 h1:Cso4Ev/XauMVsbwdhYEoxg8rxZWw43CFqqaPB5w3W2c= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1 h1:mx2ucgtv+MWzJesJY9Ig/8AFHgoE5FwLXwUVgW/FGdI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.60.1/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5 h1:SKvPgvdvmiTWoi0GAJ7AsJfOz3ngVkD/ERbs5pUnHNI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.5/go.mod h1:20sz31hv/WsPa3HhU3hfrIet2kxM4Pe0r20eBZ20Tac= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5 h1:OMsEmCyz2i89XwRwPouAJvhj81wINh+4UK+k/0Yo/q8= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.5/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= From 1875c58513ef5f55ccb84de1e0556a9842b1fc3b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 28 Aug 2024 14:12:56 -0700 Subject: [PATCH 02/10] fix(deps): update all opentelemetry-go-contrib packages to v0.54.0 (#34863) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc](https://togithub.com/open-telemetry/opentelemetry-go-contrib) | `v0.53.0` -> `v0.54.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/go.opentelemetry.io%2fcontrib%2finstrumentation%2fgoogle.golang.org%2fgrpc%2fotelgrpc/v0.54.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/go.opentelemetry.io%2fcontrib%2finstrumentation%2fgoogle.golang.org%2fgrpc%2fotelgrpc/v0.54.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/go.opentelemetry.io%2fcontrib%2finstrumentation%2fgoogle.golang.org%2fgrpc%2fotelgrpc/v0.53.0/v0.54.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/go.opentelemetry.io%2fcontrib%2finstrumentation%2fgoogle.golang.org%2fgrpc%2fotelgrpc/v0.53.0/v0.54.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://togithub.com/open-telemetry/opentelemetry-go-contrib) | `v0.53.0` -> `v0.54.0` | [![age](https://developer.mend.io/api/mc/badges/age/go/go.opentelemetry.io%2fcontrib%2finstrumentation%2fnet%2fhttp%2fotelhttp/v0.54.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/go/go.opentelemetry.io%2fcontrib%2finstrumentation%2fnet%2fhttp%2fotelhttp/v0.54.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/go/go.opentelemetry.io%2fcontrib%2finstrumentation%2fnet%2fhttp%2fotelhttp/v0.53.0/v0.54.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/go/go.opentelemetry.io%2fcontrib%2finstrumentation%2fnet%2fhttp%2fotelhttp/v0.53.0/v0.54.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | --- > [!WARNING] > Some dependencies could not be looked up. Check the Dependency Dashboard for more information. --- ### Configuration ๐Ÿ“… **Schedule**: Branch creation - "on tuesday" (UTC), Automerge - At any time (no schedule defined). ๐Ÿšฆ **Automerge**: Disabled by config. Please merge this manually once you are satisfied. โ™ป **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. ๐Ÿ”• **Ignore**: Close this PR and you won't be reminded about these updates again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/open-telemetry/opentelemetry-collector-contrib). --------- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: opentelemetrybot <107717825+opentelemetrybot@users.noreply.github.com> Co-authored-by: Yang Song --- cmd/otelcontribcol/go.mod | 4 ++-- cmd/otelcontribcol/go.sum | 8 ++++---- cmd/oteltestbedcol/go.mod | 4 ++-- cmd/oteltestbedcol/go.sum | 8 ++++---- examples/demo/client/go.mod | 2 +- examples/demo/client/go.sum | 4 ++-- examples/demo/server/go.mod | 2 +- examples/demo/server/go.sum | 4 ++-- .../elasticsearchexporter/integrationtest/go.mod | 4 ++-- .../elasticsearchexporter/integrationtest/go.sum | 8 ++++---- exporter/opencensusexporter/go.mod | 6 +++--- exporter/opencensusexporter/go.sum | 12 ++++++------ receiver/opencensusreceiver/go.mod | 6 +++--- receiver/opencensusreceiver/go.sum | 12 ++++++------ testbed/go.mod | 4 ++-- testbed/go.sum | 8 ++++---- 16 files changed, 48 insertions(+), 48 deletions(-) diff --git a/cmd/otelcontribcol/go.mod b/cmd/otelcontribcol/go.mod index 5363417feba6..df93dbeceda7 100644 --- a/cmd/otelcontribcol/go.mod +++ b/cmd/otelcontribcol/go.mod @@ -790,7 +790,7 @@ require ( go.opentelemetry.io/collector/semconv v0.108.1 // indirect go.opentelemetry.io/collector/service v0.108.1 // indirect go.opentelemetry.io/contrib/config v0.8.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect go.opentelemetry.io/contrib/zpages v0.53.0 // indirect @@ -831,7 +831,7 @@ require ( google.golang.org/api v0.194.0 // indirect google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/cmd/otelcontribcol/go.sum b/cmd/otelcontribcol/go.sum index aa35c621052b..5951ba9b2ba2 100644 --- a/cmd/otelcontribcol/go.sum +++ b/cmd/otelcontribcol/go.sum @@ -2464,8 +2464,8 @@ go.opentelemetry.io/collector/service v0.108.1 h1:Ov0qP5JiX0DHaCrZY0jHnpqc8MVHwS go.opentelemetry.io/collector/service v0.108.1/go.mod h1:mbZXlO5gT99nXNGbDOG6peqPd6og5Tm49P4FP0oi56U= go.opentelemetry.io/contrib/config v0.8.0 h1:OD7aDMhL+2EpzdSHfkDmcdD/uUA+PgKM5faFyF9XFT0= go.opentelemetry.io/contrib/config v0.8.0/go.mod h1:dGeVZWE//3wrxYHHP0iCBYJU1QmOmPcbV+FNB7pjDYI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y= @@ -3231,8 +3231,8 @@ google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 h1:oLiyxGgE+rt22du google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/cmd/oteltestbedcol/go.mod b/cmd/oteltestbedcol/go.mod index 955ee6409214..ca5f0846d62f 100644 --- a/cmd/oteltestbedcol/go.mod +++ b/cmd/oteltestbedcol/go.mod @@ -256,7 +256,7 @@ require ( go.opentelemetry.io/collector/semconv v0.108.1 // indirect go.opentelemetry.io/collector/service v0.108.1 // indirect go.opentelemetry.io/contrib/config v0.8.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect go.opentelemetry.io/contrib/zpages v0.53.0 // indirect @@ -293,7 +293,7 @@ require ( gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/api v0.188.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/cmd/oteltestbedcol/go.sum b/cmd/oteltestbedcol/go.sum index baf8f3746873..91ba6cf17e28 100644 --- a/cmd/oteltestbedcol/go.sum +++ b/cmd/oteltestbedcol/go.sum @@ -783,8 +783,8 @@ go.opentelemetry.io/collector/service v0.108.1 h1:Ov0qP5JiX0DHaCrZY0jHnpqc8MVHwS go.opentelemetry.io/collector/service v0.108.1/go.mod h1:mbZXlO5gT99nXNGbDOG6peqPd6og5Tm49P4FP0oi56U= go.opentelemetry.io/contrib/config v0.8.0 h1:OD7aDMhL+2EpzdSHfkDmcdD/uUA+PgKM5faFyF9XFT0= go.opentelemetry.io/contrib/config v0.8.0/go.mod h1:dGeVZWE//3wrxYHHP0iCBYJU1QmOmPcbV+FNB7pjDYI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y= @@ -1149,8 +1149,8 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/examples/demo/client/go.mod b/examples/demo/client/go.mod index 03a90fa5cdf4..1c1cb3776aab 100644 --- a/examples/demo/client/go.mod +++ b/examples/demo/client/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/examples/demo/c go 1.22.0 require ( - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 diff --git a/examples/demo/client/go.sum b/examples/demo/client/go.sum index f1862b3abf56..a831fedb6067 100644 --- a/examples/demo/client/go.sum +++ b/examples/demo/client/go.sum @@ -19,8 +19,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 h1:k6fQVDQexDE+3jG2SfCQjnHS7OamcP73YMoxEVq5B6k= diff --git a/examples/demo/server/go.mod b/examples/demo/server/go.mod index a09217d70fab..810bd725d989 100644 --- a/examples/demo/server/go.mod +++ b/examples/demo/server/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/examples/demo/s go 1.22.0 require ( - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 diff --git a/examples/demo/server/go.sum b/examples/demo/server/go.sum index f1862b3abf56..a831fedb6067 100644 --- a/examples/demo/server/go.sum +++ b/examples/demo/server/go.sum @@ -19,8 +19,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 h1:k6fQVDQexDE+3jG2SfCQjnHS7OamcP73YMoxEVq5B6k= diff --git a/exporter/elasticsearchexporter/integrationtest/go.mod b/exporter/elasticsearchexporter/integrationtest/go.mod index eaf9900bd7d7..6869317181e4 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.mod +++ b/exporter/elasticsearchexporter/integrationtest/go.mod @@ -158,7 +158,7 @@ require ( go.opentelemetry.io/collector/semconv v0.108.1 // indirect go.opentelemetry.io/collector/service v0.108.1 // indirect go.opentelemetry.io/contrib/config v0.8.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect go.opentelemetry.io/contrib/zpages v0.53.0 // indirect @@ -186,7 +186,7 @@ require ( golang.org/x/text v0.17.0 // indirect gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/exporter/elasticsearchexporter/integrationtest/go.sum b/exporter/elasticsearchexporter/integrationtest/go.sum index 8b19664b1321..06cce831a7ee 100644 --- a/exporter/elasticsearchexporter/integrationtest/go.sum +++ b/exporter/elasticsearchexporter/integrationtest/go.sum @@ -361,8 +361,8 @@ go.opentelemetry.io/collector/service v0.108.1 h1:Ov0qP5JiX0DHaCrZY0jHnpqc8MVHwS go.opentelemetry.io/collector/service v0.108.1/go.mod h1:mbZXlO5gT99nXNGbDOG6peqPd6og5Tm49P4FP0oi56U= go.opentelemetry.io/contrib/config v0.8.0 h1:OD7aDMhL+2EpzdSHfkDmcdD/uUA+PgKM5faFyF9XFT0= go.opentelemetry.io/contrib/config v0.8.0/go.mod h1:dGeVZWE//3wrxYHHP0iCBYJU1QmOmPcbV+FNB7pjDYI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y= @@ -475,8 +475,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/exporter/opencensusexporter/go.mod b/exporter/opencensusexporter/go.mod index 08d1dfcd5833..7911c90e5f82 100644 --- a/exporter/opencensusexporter/go.mod +++ b/exporter/opencensusexporter/go.mod @@ -75,7 +75,7 @@ require ( go.opentelemetry.io/collector/featuregate v1.14.1 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.108.1 // indirect go.opentelemetry.io/collector/semconv v0.108.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect @@ -84,11 +84,11 @@ require ( go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/net v0.28.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/text v0.17.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporter/opencensusexporter/go.sum b/exporter/opencensusexporter/go.sum index 2ee06e59e222..7b514babc0e3 100644 --- a/exporter/opencensusexporter/go.sum +++ b/exporter/opencensusexporter/go.sum @@ -176,8 +176,8 @@ go.opentelemetry.io/collector/receiver v0.108.1 h1:YQgDv69v3fgd6uoiGZ+vUdUPdNzoo go.opentelemetry.io/collector/receiver v0.108.1/go.mod h1:eKe/VJgdvHr8JsBDma/PF3DlaheTRC2X6AmCUByJCNU= go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= @@ -215,8 +215,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -252,8 +252,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/receiver/opencensusreceiver/go.mod b/receiver/opencensusreceiver/go.mod index c75f77029d74..2d4b3f8fc1a5 100644 --- a/receiver/opencensusreceiver/go.mod +++ b/receiver/opencensusreceiver/go.mod @@ -22,7 +22,7 @@ require ( go.opentelemetry.io/collector/pdata v1.14.1 go.opentelemetry.io/collector/pdata/testdata v0.108.1 go.opentelemetry.io/collector/receiver v0.108.1 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/sdk v1.29.0 go.opentelemetry.io/otel/trace v1.29.0 @@ -81,11 +81,11 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/net v0.28.0 // indirect golang.org/x/sys v0.24.0 // indirect golang.org/x/text v0.17.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/receiver/opencensusreceiver/go.sum b/receiver/opencensusreceiver/go.sum index c0ba19f6b12e..ea900c93fe75 100644 --- a/receiver/opencensusreceiver/go.sum +++ b/receiver/opencensusreceiver/go.sum @@ -170,8 +170,8 @@ go.opentelemetry.io/collector/receiver v0.108.1 h1:YQgDv69v3fgd6uoiGZ+vUdUPdNzoo go.opentelemetry.io/collector/receiver v0.108.1/go.mod h1:eKe/VJgdvHr8JsBDma/PF3DlaheTRC2X6AmCUByJCNU= go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= @@ -209,8 +209,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -246,8 +246,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= diff --git a/testbed/go.mod b/testbed/go.mod index a441a5c4a84e..c8cf5327f6a4 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -262,7 +262,7 @@ require ( go.opentelemetry.io/collector/pdata/testdata v0.108.1 // indirect go.opentelemetry.io/collector/service v0.108.1 // indirect go.opentelemetry.io/contrib/config v0.8.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect go.opentelemetry.io/contrib/zpages v0.53.0 // indirect @@ -297,7 +297,7 @@ require ( gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/api v0.188.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/testbed/go.sum b/testbed/go.sum index 92de415d8737..717b4cc6af75 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -805,8 +805,8 @@ go.opentelemetry.io/collector/service v0.108.1 h1:Ov0qP5JiX0DHaCrZY0jHnpqc8MVHwS go.opentelemetry.io/collector/service v0.108.1/go.mod h1:mbZXlO5gT99nXNGbDOG6peqPd6og5Tm49P4FP0oi56U= go.opentelemetry.io/contrib/config v0.8.0 h1:OD7aDMhL+2EpzdSHfkDmcdD/uUA+PgKM5faFyF9XFT0= go.opentelemetry.io/contrib/config v0.8.0/go.mod h1:dGeVZWE//3wrxYHHP0iCBYJU1QmOmPcbV+FNB7pjDYI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y= @@ -1177,8 +1177,8 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= From 8bb1fe45522b6093067f914d58e119582744d418 Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Wed, 28 Aug 2024 14:18:00 -0700 Subject: [PATCH 03/10] [chore][receiver/chrony] Fix readme config references (#34846) The README is referencing a config option as `address`, but it's actually `endpoint`. This incorrect reference was introduced in the [original PR introducing this component](https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/12101). Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/34839 --- receiver/chronyreceiver/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/receiver/chronyreceiver/README.md b/receiver/chronyreceiver/README.md index 9d8c2893cf4d..7d1b553af819 100644 --- a/receiver/chronyreceiver/README.md +++ b/receiver/chronyreceiver/README.md @@ -25,7 +25,7 @@ By default, the `chrony` receiver will default to the following configuration: ```yaml chrony/defaults: - address: unix:///var/run/chrony/chronyd.sock # The default port by chronyd to allow cmd access + endpoint: unix:///var/run/chrony/chronyd.sock # The default port by chronyd to allow cmd access timeout: 10s # Allowing at least 10s for chronyd to respond before giving up chrony: @@ -36,7 +36,7 @@ chrony: The following options can be customised: -- address (required) - the address on where to communicate to `chronyd` +- endpoint (required) - the address on where to communicate to `chronyd` - The allowed formats are the following - udp://hostname:port - unix:///path/to/chrony.sock (Please note the triple slash) @@ -55,7 +55,7 @@ An example of the configuration is: ```yaml receivers: chrony: - address: unix:///var/run/chrony/chronyd.sock + endpoint: unix:///var/run/chrony/chronyd.sock timeout: 10s collection_interval: 30s metrics: From 38c49210594b7d43a425a6e36cd392bfb11030b8 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 29 Aug 2024 08:12:16 +0200 Subject: [PATCH 04/10] Added mapping documentation into es exporter readme (#34014) **Description:** Adding mapping documentation to the doc **Link to tracking Issue:** - **Testing:** - **Documentation:** exporter/elasticsearchexporter/README.md Thinking about automating this but having compound mapping i'm not finding any common way of specifying these. Will leave it for a folloup --------- Co-authored-by: Antoine Toulme --- exporter/elasticsearchexporter/README.md | 88 ++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/exporter/elasticsearchexporter/README.md b/exporter/elasticsearchexporter/README.md index dd8f5f1e722e..4b3af781306a 100644 --- a/exporter/elasticsearchexporter/README.md +++ b/exporter/elasticsearchexporter/README.md @@ -249,3 +249,91 @@ Exponential Histograms are ignored. [data stream]: https://www.elastic.co/guide/en/elasticsearch/reference/current/data-streams.html [ecs]: https://www.elastic.co/guide/en/ecs/current/index.html [SemConv]: https://github.com/open-telemetry/semantic-conventions + + +## ECS Mapping + +`elasticsearchexporter` follows ECS mapping defined here: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model-appendix.md#elastic-common-schema + +When `mode` is set to `ecs`, `elasticsearchexporter` performs conversions for resource-level attributes from their Semantic Conventions (SemConv) names to equivalent Elastic Common Schema (ECS) names. + +If the target ECS field name is specified as an empty string (""), the converter will neither convert the SemConv key to the equivalent ECS name nor pass through the SemConv key as-is to become the ECS name. + +When "Preserved" is true, the attribute will be preserved in the payload and duplicated as mapped to its ECS equivalent. + +| Semantic Convention Name | ECS Name | Preserve | +|--------------------------|----------|----------| +| cloud.platform | cloud.service.name | false | +| container.image.tags | container.image.tag | false | +| deployment.environment | service.environment | false | +| host.arch | host.architecture | false | +| host.name | host.hostname | true | +| k8s.deployment.name | kubernetes.deployment.name | false | +| k8s.namespace.name | kubernetes.namespace | false | +| k8s.node.name | kubernetes.node.name | false | +| k8s.pod.name | kubernetes.pod.name | false | +| k8s.pod.uid | kubernetes.pod.uid | false | +| os.description | host.os.full | false | +| os.name | host.os.name | false | +| os.type | host.os.platform | false | +| os.version | host.os.version | false | +| process.executable.path | process.executable | false | +| process.runtime.name | service.runtime.name | false | +| process.runtime.version | service.runtime.version | false | +| service.instance.id | service.node.name | false | +| telemetry.distro.name | "" | false | +| telemetry.distro.version | "" | false | +| telemetry.sdk.language | "" | false | +| telemetry.sdk.name | "" | false | +| telemetry.sdk.version | "" | false | + +### Compound Mapping + +There are ECS fields that are not mapped easily 1 to 1 but require more advanced logic. + +#### `agent.name` + +The agent name takes the form of a compound name consisting of 3 components: +- `telemetry.sdk.name` or, if not present, defaults to `otlp`, +- `telemetry.sdk.language`, defaulting to `unknown` in case it is missing, +- `telemetry.distro.name`, which is allowed to be empty. + +These values are all valid: + +| `telemetry.sdk.name` | `telemetry.sdk.language` | `telemetry.distro.name` | `agent.name` | +|----------------------|--------------------------|-------------------------|------------------------| +| "" | "" | "" | `otlp/unknown` | +| "" | dotnet | "" | `otlp/dotnet` | +| opentelemetry | dotnet | "" | `opentelemetry/dotnet` | +| "" | java | parts-unlimited-java | `otlp/java/parts-unlimited-java` | +| "" | "" | parts-unlimited-java | `otlp/unknown/parts-unlimited-java` | + +#### `agent.version` + +Takes the value of `telemetry.distro.version` or `telemetry.sdk.version`. If both telemetry.distro.version and telemetry.sdk.version are present, telemetry.distro.version takes precedence. + +#### `host.os.type` + +Maps values of `os.type` in the following manner: + +| SemConv Value | ECS Value | +|---------------|-----------| +| windows | windows | +| linux | linux | +| darwin | macos | +| aix | unix | +| hpux | unix | +| solaris | unix | + +In case `os.name` is present and falls within the specified range of values: + +| SemConv Value | ECS Value | +|---------------|-----------| +| Android | android | +| iOS | ios | + +Otherwise, it is mapped to an empty string (""). + +#### `@timestamp` + +In case the record contains `timestamp`, this value is used. Otherwise, the `observed timestamp` is used. \ No newline at end of file From 3d5bb5f7f96f937a8ff2e853772a8f8c8ddafcdc Mon Sep 17 00:00:00 2001 From: Arthur Silva Sens Date: Thu, 29 Aug 2024 04:59:30 -0300 Subject: [PATCH 05/10] [processor/interval] Support Gauges and Summaries (#34805) **Description:** Adds support for Gauges and Summaries **Link to tracking Issue:** #34803 **Testing:** Unit tests were extended to cover the new behavior **Documentation:** --------- Signed-off-by: Arthur Silva Sens --- .../intervalprocessor_gauge_summary.yaml | 27 ++++++++ processor/intervalprocessor/README.md | 8 ++- processor/intervalprocessor/config.go | 8 ++- processor/intervalprocessor/factory.go | 4 +- .../internal/metrics/metrics.go | 3 - processor/intervalprocessor/processor.go | 29 +++++++-- processor/intervalprocessor/processor_test.go | 32 +++++----- .../testdata/gauges_are_aggregated/input.yaml | 40 ++++++++++++ .../testdata/gauges_are_aggregated/next.yaml | 1 + .../gauges_are_aggregated/output.yaml | 27 ++++++++ .../gauges_are_passed_through/input.yaml | 2 +- .../gauges_are_passed_through/next.yaml | 2 +- .../gauges_are_passed_through/output.yaml | 2 +- .../summaries_are_aggregated/input.yaml | 63 +++++++++++++++++++ .../summaries_are_aggregated/next.yaml | 1 + .../summaries_are_aggregated/output.yaml | 34 ++++++++++ .../summaries_are_passed_through/input.yaml | 2 +- .../summaries_are_passed_through/next.yaml | 2 +- .../summaries_are_passed_through/output.yaml | 2 +- 19 files changed, 258 insertions(+), 31 deletions(-) create mode 100644 .chloggen/intervalprocessor_gauge_summary.yaml create mode 100644 processor/intervalprocessor/testdata/gauges_are_aggregated/input.yaml create mode 100644 processor/intervalprocessor/testdata/gauges_are_aggregated/next.yaml create mode 100644 processor/intervalprocessor/testdata/gauges_are_aggregated/output.yaml create mode 100644 processor/intervalprocessor/testdata/summaries_are_aggregated/input.yaml create mode 100644 processor/intervalprocessor/testdata/summaries_are_aggregated/next.yaml create mode 100644 processor/intervalprocessor/testdata/summaries_are_aggregated/output.yaml diff --git a/.chloggen/intervalprocessor_gauge_summary.yaml b/.chloggen/intervalprocessor_gauge_summary.yaml new file mode 100644 index 000000000000..96894ec68371 --- /dev/null +++ b/.chloggen/intervalprocessor_gauge_summary.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: processor/interval + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Support for gauge and summary metrics. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34803] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: Only the last value of a gauge or summary metric is reported in the interval processor, instead of all values. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/processor/intervalprocessor/README.md b/processor/intervalprocessor/README.md index 0c4971e73566..857bcf6c7d58 100644 --- a/processor/intervalprocessor/README.md +++ b/processor/intervalprocessor/README.md @@ -19,19 +19,23 @@ The interval processor (`intervalprocessor`) aggregates metrics and periodically * Monotonically increasing, cumulative sums * Monotonically increasing, cumulative histograms * Monotonically increasing, cumulative exponential histograms +* Gauges +* Summaries The following metric types will *not* be aggregated, and will instead be passed, unchanged, to the next component in the pipeline: * All delta metrics * Non-monotonically increasing sums -* Gauges -* Summaries + +> NOTE: Aggregating data over an interval is an inherently "lossy" process. For monotonically increasing, cumulative sums, histograms, and exponential histograms, you "lose" precision, but you don't lose overall data. But for non-monotonically increasing sums, gauges, and summaries, aggregation represents actual data loss. IE you could "lose" that a value increased and then decreased back to the original value. In most cases, this data "loss" is ok. However, if you would rather these values be passed through, and *not* aggregated, you can set that in the configuration ## Configuration The following settings can be optionally configured: * `interval`: The interval in which the processor should export the aggregated metrics. Default: 60s +* `gauge_pass_through`: Whether gauges should pass through as they are to the next component or be aggregated. Default: false +* `summary_pass_through`: Whether summaries should pass through as they are to the next component or be aggregated. Default: false ## Example of metric flows diff --git a/processor/intervalprocessor/config.go b/processor/intervalprocessor/config.go index 1967afc972bb..96ad36189f80 100644 --- a/processor/intervalprocessor/config.go +++ b/processor/intervalprocessor/config.go @@ -18,8 +18,14 @@ var _ component.Config = (*Config)(nil) // Config defines the configuration for the processor. type Config struct { - // Interval is the time + // Interval is the time interval at which the processor will aggregate metrics. Interval time.Duration `mapstructure:"interval"` + // GaugePassThrough is a flag that determines whether gauge metrics should be passed through + // as they are or aggregated. + GaugePassThrough bool `mapstructure:"gauge_pass_through"` + // SummaryPassThrough is a flag that determines whether summary metrics should be passed through + // as they are or aggregated. + SummaryPassThrough bool `mapstructure:"summary_pass_through"` } // Validate checks whether the input configuration has all of the required fields for the processor. diff --git a/processor/intervalprocessor/factory.go b/processor/intervalprocessor/factory.go index 87a1278cbc0b..981cc63f29a2 100644 --- a/processor/intervalprocessor/factory.go +++ b/processor/intervalprocessor/factory.go @@ -25,7 +25,9 @@ func NewFactory() processor.Factory { func createDefaultConfig() component.Config { return &Config{ - Interval: 60 * time.Second, + Interval: 60 * time.Second, + GaugePassThrough: false, + SummaryPassThrough: false, } } diff --git a/processor/intervalprocessor/internal/metrics/metrics.go b/processor/intervalprocessor/internal/metrics/metrics.go index c3febf1a173a..f06a91a8bc06 100644 --- a/processor/intervalprocessor/internal/metrics/metrics.go +++ b/processor/intervalprocessor/internal/metrics/metrics.go @@ -5,7 +5,6 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-con import ( "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/pmetric" ) type DataPointSlice[DP DataPoint[DP]] interface { @@ -15,8 +14,6 @@ type DataPointSlice[DP DataPoint[DP]] interface { } type DataPoint[Self any] interface { - pmetric.NumberDataPoint | pmetric.HistogramDataPoint | pmetric.ExponentialHistogramDataPoint - Timestamp() pcommon.Timestamp Attributes() pcommon.Map CopyTo(dest Self) diff --git a/processor/intervalprocessor/processor.go b/processor/intervalprocessor/processor.go index 6960472e5395..fa49a04211d8 100644 --- a/processor/intervalprocessor/processor.go +++ b/processor/intervalprocessor/processor.go @@ -36,8 +36,11 @@ type Processor struct { numberLookup map[identity.Stream]pmetric.NumberDataPoint histogramLookup map[identity.Stream]pmetric.HistogramDataPoint expHistogramLookup map[identity.Stream]pmetric.ExponentialHistogramDataPoint + summaryLookup map[identity.Stream]pmetric.SummaryDataPoint - exportInterval time.Duration + exportInterval time.Duration + gaugePassThrough bool + summaryPassThrough bool nextConsumer consumer.Metrics } @@ -59,8 +62,11 @@ func newProcessor(config *Config, log *zap.Logger, nextConsumer consumer.Metrics numberLookup: map[identity.Stream]pmetric.NumberDataPoint{}, histogramLookup: map[identity.Stream]pmetric.HistogramDataPoint{}, expHistogramLookup: map[identity.Stream]pmetric.ExponentialHistogramDataPoint{}, + summaryLookup: map[identity.Stream]pmetric.SummaryDataPoint{}, - exportInterval: config.Interval, + exportInterval: config.Interval, + gaugePassThrough: config.GaugePassThrough, + summaryPassThrough: config.SummaryPassThrough, nextConsumer: nextConsumer, } @@ -102,8 +108,22 @@ func (p *Processor) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) erro rm.ScopeMetrics().RemoveIf(func(sm pmetric.ScopeMetrics) bool { sm.Metrics().RemoveIf(func(m pmetric.Metric) bool { switch m.Type() { - case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary: - return false + case pmetric.MetricTypeSummary: + if p.summaryPassThrough { + return false + } + + mClone, metricID := p.getOrCloneMetric(rm, sm, m) + aggregateDataPoints(m.Summary().DataPoints(), mClone.Summary().DataPoints(), metricID, p.summaryLookup) + return true + case pmetric.MetricTypeGauge: + if p.gaugePassThrough { + return false + } + + mClone, metricID := p.getOrCloneMetric(rm, sm, m) + aggregateDataPoints(m.Gauge().DataPoints(), mClone.Gauge().DataPoints(), metricID, p.numberLookup) + return true case pmetric.MetricTypeSum: // Check if we care about this value sum := m.Sum() @@ -202,6 +222,7 @@ func (p *Processor) exportMetrics() { clear(p.numberLookup) clear(p.histogramLookup) clear(p.expHistogramLookup) + clear(p.summaryLookup) return out }() diff --git a/processor/intervalprocessor/processor_test.go b/processor/intervalprocessor/processor_test.go index 39cb953d2310..cda18e561b5d 100644 --- a/processor/intervalprocessor/processor_test.go +++ b/processor/intervalprocessor/processor_test.go @@ -21,26 +21,29 @@ import ( func TestAggregation(t *testing.T) { t.Parallel() - testCases := []string{ - "basic_aggregation", - "non_monotonic_sums_are_passed_through", - "summaries_are_passed_through", - "histograms_are_aggregated", - "exp_histograms_are_aggregated", - "all_delta_metrics_are_passed_through", + testCases := []struct { + name string + passThrough bool + }{ + {name: "basic_aggregation"}, + {name: "histograms_are_aggregated"}, + {name: "exp_histograms_are_aggregated"}, + {name: "gauges_are_aggregated"}, + {name: "summaries_are_aggregated"}, + {name: "all_delta_metrics_are_passed_through"}, // Deltas are passed through even when aggregation is enabled + {name: "non_monotonic_sums_are_passed_through"}, // Non-monotonic sums are passed through even when aggregation is enabled + {name: "gauges_are_passed_through", passThrough: true}, + {name: "summaries_are_passed_through", passThrough: true}, } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := &Config{Interval: time.Second} - + var config *Config for _, tc := range testCases { - testName := tc - - t.Run(testName, func(t *testing.T) { - t.Parallel() + config = &Config{Interval: time.Second, GaugePassThrough: tc.passThrough, SummaryPassThrough: tc.passThrough} + t.Run(tc.name, func(t *testing.T) { // next stores the results of the filter metric processor next := &consumertest.MetricsSink{} @@ -53,7 +56,7 @@ func TestAggregation(t *testing.T) { ) require.NoError(t, err) - dir := filepath.Join("testdata", testName) + dir := filepath.Join("testdata", tc.name) md, err := golden.ReadMetrics(filepath.Join(dir, "input.yaml")) require.NoError(t, err) @@ -75,6 +78,7 @@ func TestAggregation(t *testing.T) { require.Empty(t, processor.numberLookup) require.Empty(t, processor.histogramLookup) require.Empty(t, processor.expHistogramLookup) + require.Empty(t, processor.summaryLookup) // Exporting again should return nothing processor.exportMetrics() diff --git a/processor/intervalprocessor/testdata/gauges_are_aggregated/input.yaml b/processor/intervalprocessor/testdata/gauges_are_aggregated/input.yaml new file mode 100644 index 000000000000..019dd6dd8511 --- /dev/null +++ b/processor/intervalprocessor/testdata/gauges_are_aggregated/input.yaml @@ -0,0 +1,40 @@ +resourceMetrics: + - schemaUrl: https://test-res-schema.com/schema + resource: + attributes: + - key: asdf + value: + stringValue: foo + scopeMetrics: + - schemaUrl: https://test-scope-schema.com/schema + scope: + name: MyTestInstrument + version: "1.2.3" + attributes: + - key: foo + value: + stringValue: bar + metrics: + - name: test.gauge + gauge: + aggregationTemporality: 2 + dataPoints: + - timeUnixNano: 50 + asDouble: 345 + attributes: + - key: aaa + value: + stringValue: bbb + - timeUnixNano: 20 + asDouble: 258 + attributes: + - key: aaa + value: + stringValue: bbb + # For interval processor point of view, only the last datapoint should be passed through. + - timeUnixNano: 80 + asDouble: 178 + attributes: + - key: aaa + value: + stringValue: bbb \ No newline at end of file diff --git a/processor/intervalprocessor/testdata/gauges_are_aggregated/next.yaml b/processor/intervalprocessor/testdata/gauges_are_aggregated/next.yaml new file mode 100644 index 000000000000..d2e76ef0f16b --- /dev/null +++ b/processor/intervalprocessor/testdata/gauges_are_aggregated/next.yaml @@ -0,0 +1 @@ +resourceMetrics: [] \ No newline at end of file diff --git a/processor/intervalprocessor/testdata/gauges_are_aggregated/output.yaml b/processor/intervalprocessor/testdata/gauges_are_aggregated/output.yaml new file mode 100644 index 000000000000..fe0b264bd1db --- /dev/null +++ b/processor/intervalprocessor/testdata/gauges_are_aggregated/output.yaml @@ -0,0 +1,27 @@ +resourceMetrics: + - schemaUrl: https://test-res-schema.com/schema + resource: + attributes: + - key: asdf + value: + stringValue: foo + scopeMetrics: + - schemaUrl: https://test-scope-schema.com/schema + scope: + name: MyTestInstrument + version: "1.2.3" + attributes: + - key: foo + value: + stringValue: bar + metrics: + - name: test.gauge + gauge: + aggregationTemporality: 2 + dataPoints: + - timeUnixNano: 80 + asDouble: 178 + attributes: + - key: aaa + value: + stringValue: bbb \ No newline at end of file diff --git a/processor/intervalprocessor/testdata/gauges_are_passed_through/input.yaml b/processor/intervalprocessor/testdata/gauges_are_passed_through/input.yaml index a3d65c2986e0..89b1879ee4d8 100644 --- a/processor/intervalprocessor/testdata/gauges_are_passed_through/input.yaml +++ b/processor/intervalprocessor/testdata/gauges_are_passed_through/input.yaml @@ -36,4 +36,4 @@ resourceMetrics: attributes: - key: aaa value: - stringValue: bbb + stringValue: bbb \ No newline at end of file diff --git a/processor/intervalprocessor/testdata/gauges_are_passed_through/next.yaml b/processor/intervalprocessor/testdata/gauges_are_passed_through/next.yaml index a3d65c2986e0..c1e8b3add92e 100644 --- a/processor/intervalprocessor/testdata/gauges_are_passed_through/next.yaml +++ b/processor/intervalprocessor/testdata/gauges_are_passed_through/next.yaml @@ -36,4 +36,4 @@ resourceMetrics: attributes: - key: aaa value: - stringValue: bbb + stringValue: bbb \ No newline at end of file diff --git a/processor/intervalprocessor/testdata/gauges_are_passed_through/output.yaml b/processor/intervalprocessor/testdata/gauges_are_passed_through/output.yaml index 3949e7c54ded..d2e76ef0f16b 100644 --- a/processor/intervalprocessor/testdata/gauges_are_passed_through/output.yaml +++ b/processor/intervalprocessor/testdata/gauges_are_passed_through/output.yaml @@ -1 +1 @@ -resourceMetrics: [] +resourceMetrics: [] \ No newline at end of file diff --git a/processor/intervalprocessor/testdata/summaries_are_aggregated/input.yaml b/processor/intervalprocessor/testdata/summaries_are_aggregated/input.yaml new file mode 100644 index 000000000000..c0190dd5c614 --- /dev/null +++ b/processor/intervalprocessor/testdata/summaries_are_aggregated/input.yaml @@ -0,0 +1,63 @@ +resourceMetrics: + - schemaUrl: https://test-res-schema.com/schema + resource: + attributes: + - key: asdf + value: + stringValue: foo + scopeMetrics: + - schemaUrl: https://test-scope-schema.com/schema + scope: + name: MyTestInstrument + version: "1.2.3" + attributes: + - key: foo + value: + stringValue: bar + metrics: + - name: summary.test + summary: + dataPoints: + - timeUnixNano: 50 + quantileValues: + - quantile: 0.25 + value: 50 + - quantile: 0.5 + value: 20 + - quantile: 0.75 + value: 75 + - quantile: 0.95 + value: 10 + attributes: + - key: aaa + value: + stringValue: bbb + - timeUnixNano: 20 + quantileValues: + - quantile: 0.25 + value: 40 + - quantile: 0.5 + value: 10 + - quantile: 0.75 + value: 60 + - quantile: 0.95 + value: 5 + attributes: + - key: aaa + value: + stringValue: bbb + # Only last summary should pass through + - timeUnixNano: 80 + quantileValues: + - quantile: 0.25 + value: 80 + - quantile: 0.5 + value: 35 + - quantile: 0.75 + value: 90 + - quantile: 0.95 + value: 15 + attributes: + - key: aaa + value: + stringValue: bbb diff --git a/processor/intervalprocessor/testdata/summaries_are_aggregated/next.yaml b/processor/intervalprocessor/testdata/summaries_are_aggregated/next.yaml new file mode 100644 index 000000000000..d2e76ef0f16b --- /dev/null +++ b/processor/intervalprocessor/testdata/summaries_are_aggregated/next.yaml @@ -0,0 +1 @@ +resourceMetrics: [] \ No newline at end of file diff --git a/processor/intervalprocessor/testdata/summaries_are_aggregated/output.yaml b/processor/intervalprocessor/testdata/summaries_are_aggregated/output.yaml new file mode 100644 index 000000000000..75b8475e9ba7 --- /dev/null +++ b/processor/intervalprocessor/testdata/summaries_are_aggregated/output.yaml @@ -0,0 +1,34 @@ +resourceMetrics: + - schemaUrl: https://test-res-schema.com/schema + resource: + attributes: + - key: asdf + value: + stringValue: foo + scopeMetrics: + - schemaUrl: https://test-scope-schema.com/schema + scope: + name: MyTestInstrument + version: "1.2.3" + attributes: + - key: foo + value: + stringValue: bar + metrics: + - name: summary.test + summary: + dataPoints: + - timeUnixNano: 80 + quantileValues: + - quantile: 0.25 + value: 80 + - quantile: 0.5 + value: 35 + - quantile: 0.75 + value: 90 + - quantile: 0.95 + value: 15 + attributes: + - key: aaa + value: + stringValue: bbb diff --git a/processor/intervalprocessor/testdata/summaries_are_passed_through/input.yaml b/processor/intervalprocessor/testdata/summaries_are_passed_through/input.yaml index 15862ceb73e8..7d9cdfd5b6fd 100644 --- a/processor/intervalprocessor/testdata/summaries_are_passed_through/input.yaml +++ b/processor/intervalprocessor/testdata/summaries_are_passed_through/input.yaml @@ -59,4 +59,4 @@ resourceMetrics: attributes: - key: aaa value: - stringValue: bbb + stringValue: bbb \ No newline at end of file diff --git a/processor/intervalprocessor/testdata/summaries_are_passed_through/next.yaml b/processor/intervalprocessor/testdata/summaries_are_passed_through/next.yaml index 15862ceb73e8..7d9cdfd5b6fd 100644 --- a/processor/intervalprocessor/testdata/summaries_are_passed_through/next.yaml +++ b/processor/intervalprocessor/testdata/summaries_are_passed_through/next.yaml @@ -59,4 +59,4 @@ resourceMetrics: attributes: - key: aaa value: - stringValue: bbb + stringValue: bbb \ No newline at end of file diff --git a/processor/intervalprocessor/testdata/summaries_are_passed_through/output.yaml b/processor/intervalprocessor/testdata/summaries_are_passed_through/output.yaml index 3949e7c54ded..d2e76ef0f16b 100644 --- a/processor/intervalprocessor/testdata/summaries_are_passed_through/output.yaml +++ b/processor/intervalprocessor/testdata/summaries_are_passed_through/output.yaml @@ -1 +1 @@ -resourceMetrics: [] +resourceMetrics: [] \ No newline at end of file From 53ad0a40e7ae541081c2ddd8fdb0db6307a98fd1 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 29 Aug 2024 16:02:44 +0200 Subject: [PATCH 06/10] [chore]: enable bool-compare rule from testifylint (#34912) #### Description Testifylint is a linter that provides best practices with the use of testify. This PR enables [bool-compare](https://github.com/Antonboom/testifylint?tab=readme-ov-file#bool-compare) rule from [testifylint](https://github.com/Antonboom/testifylint) It's linter provided by golangci-lint. Here all available rules are activated except those who require to be fixed. This PR only fixes bool-compare so the quantity of changes stays reasonnable for reviewers. Signed-off-by: Matthieu MOREL --- .golangci.yml | 18 ++++++ connector/routingconnector/logs_test.go | 2 +- connector/routingconnector/metrics_test.go | 2 +- connector/routingconnector/traces_test.go | 2 +- .../internal/store/store_test.go | 12 ++-- .../spanmetricsconnector/connector_test.go | 2 +- .../alertmanager_exporter_test.go | 4 +- exporter/awss3exporter/s3_writer_test.go | 8 +-- .../internal/translator/segment_test.go | 8 +-- exporter/datasetexporter/config_test.go | 4 +- exporter/fileexporter/factory_test.go | 8 +-- .../logs_exporter_test.go | 2 +- .../internal/logs/sender_test.go | 4 +- .../internal/traces/sender_test.go | 4 +- exporter/lokiexporter/exporter_test.go | 2 +- .../prometheusexporter/accumulator_test.go | 2 +- exporter/syslogexporter/exporter_test.go | 2 +- extension/ackextension/inmemory_test.go | 60 +++++++++---------- extension/storage/filestorage/factory_test.go | 2 +- internal/aws/containerinsight/utils_test.go | 36 +++++------ .../aws/metrics/metric_calculator_test.go | 8 +-- internal/otelarrow/test/e2e_test.go | 2 +- .../entity_events_test.go | 2 +- pkg/translator/loki/logs_to_loki_test.go | 2 +- .../groupbyattrsprocessor/factory_test.go | 6 +- .../groupbytraceprocessor/processor_test.go | 2 +- processor/k8sattributesprocessor/e2e_test.go | 6 +- .../metrics_transform_processor_group_test.go | 2 +- .../metrics_transform_processor_test.go | 2 +- processor/redactionprocessor/factory_test.go | 2 +- processor/routingprocessor/logs_test.go | 2 +- processor/routingprocessor/metrics_test.go | 2 +- processor/routingprocessor/traces_test.go | 2 +- .../cadvisor/extractors/fs_extractor_test.go | 10 ++-- .../internal/ecsInfo/utils_test.go | 4 +- .../accumulator_test.go | 8 +-- .../awsecscontainermetrics/resource_test.go | 4 +- receiver/bigipreceiver/client_test.go | 6 +- receiver/datadogreceiver/receiver_test.go | 4 +- .../k8s_event_to_logdata_test.go | 6 +- receiver/k8seventsreceiver/receiver_test.go | 6 +- .../unstructured_to_logdata_test.go | 2 +- .../broker_scraper_test.go | 2 +- .../kafkareceiver/header_extraction_test.go | 2 +- receiver/prometheusreceiver/config_test.go | 4 +- .../targetallocator/manager_test.go | 4 +- .../snmpreceiver/otel_metric_helper_test.go | 12 ++-- .../unmarshaller_receive_test.go | 2 +- 48 files changed, 159 insertions(+), 141 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index fdd0fc238b15..144d9065ad2c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -132,6 +132,23 @@ linters-settings: predeclared: ignore: copy + testifylint: + disable: + - compares + - empty + - error-is-as + - error-nil + - expected-actual + - float-compare + - go-require + - len + - negative-positive + - nil-compare + - require-error + - suite-dont-use-pkg + - useless-assert + enable-all: true + linters: enable: - decorder @@ -152,6 +169,7 @@ linters: - revive - staticcheck - tenv + - testifylint - unconvert - unparam - unused diff --git a/connector/routingconnector/logs_test.go b/connector/routingconnector/logs_test.go index 6da7ef07c21e..a362028c2870 100644 --- a/connector/routingconnector/logs_test.go +++ b/connector/routingconnector/logs_test.go @@ -463,5 +463,5 @@ func TestLogsConnectorCapabilities(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, false, conn.Capabilities().MutatesData) + assert.False(t, conn.Capabilities().MutatesData) } diff --git a/connector/routingconnector/metrics_test.go b/connector/routingconnector/metrics_test.go index 366294289b2b..b3617060b4a1 100644 --- a/connector/routingconnector/metrics_test.go +++ b/connector/routingconnector/metrics_test.go @@ -493,5 +493,5 @@ func TestMetricsConnectorCapabilities(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, false, conn.Capabilities().MutatesData) + assert.False(t, conn.Capabilities().MutatesData) } diff --git a/connector/routingconnector/traces_test.go b/connector/routingconnector/traces_test.go index d7cd62b60f5d..7d7a1a62ab72 100644 --- a/connector/routingconnector/traces_test.go +++ b/connector/routingconnector/traces_test.go @@ -417,5 +417,5 @@ func TestTraceConnectorCapabilities(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, false, conn.Capabilities().MutatesData) + assert.False(t, conn.Capabilities().MutatesData) } diff --git a/connector/servicegraphconnector/internal/store/store_test.go b/connector/servicegraphconnector/internal/store/store_test.go index 189bdd11e7ed..c4cb02a398c7 100644 --- a/connector/servicegraphconnector/internal/store/store_test.go +++ b/connector/servicegraphconnector/internal/store/store_test.go @@ -30,7 +30,7 @@ func TestStoreUpsertEdge(t *testing.T) { e.ClientService = clientService }) require.NoError(t, err) - require.Equal(t, true, isNew) + require.True(t, isNew) assert.Equal(t, 1, s.Len()) // Nothing should be evicted as TTL is set to 1h @@ -44,7 +44,7 @@ func TestStoreUpsertEdge(t *testing.T) { e.ServerService = "server" }) require.NoError(t, err) - require.Equal(t, false, isNew) + require.False(t, isNew) // Edge is complete and should have been removed assert.Equal(t, 0, s.Len()) @@ -57,7 +57,7 @@ func TestStoreUpsertEdge(t *testing.T) { e.expiration = time.UnixMicro(0) }) require.NoError(t, err) - require.Equal(t, true, isNew) + require.True(t, isNew) assert.Equal(t, 1, s.Len()) assert.Equal(t, 1, onCompletedCount) assert.Equal(t, 0, onExpireCount) @@ -80,7 +80,7 @@ func TestStoreUpsertEdge_errTooManyItems(t *testing.T) { e.ClientService = clientService }) require.NoError(t, err) - require.Equal(t, true, isNew) + require.True(t, isNew) assert.Equal(t, 1, s.Len()) _, err = s.UpsertEdge(key2, func(e *Edge) { @@ -93,7 +93,7 @@ func TestStoreUpsertEdge_errTooManyItems(t *testing.T) { e.ClientService = clientService }) require.NoError(t, err) - require.Equal(t, false, isNew) + require.False(t, isNew) assert.Equal(t, 1, s.Len()) assert.Equal(t, 0, onCallbackCounter) @@ -120,7 +120,7 @@ func TestStoreExpire(t *testing.T) { for key := range keys { isNew, err := s.UpsertEdge(key, noopCallback) require.NoError(t, err) - require.Equal(t, true, isNew) + require.True(t, isNew) } s.Expire() diff --git a/connector/spanmetricsconnector/connector_test.go b/connector/spanmetricsconnector/connector_test.go index a58de25fbbc3..893a32c2ae31 100644 --- a/connector/spanmetricsconnector/connector_test.go +++ b/connector/spanmetricsconnector/connector_test.go @@ -692,7 +692,7 @@ func TestConnectorCapabilities(t *testing.T) { // Verify assert.NotNil(t, c) - assert.Equal(t, false, caps.MutatesData) + assert.False(t, caps.MutatesData) } type errConsumer struct { diff --git a/exporter/alertmanagerexporter/alertmanager_exporter_test.go b/exporter/alertmanagerexporter/alertmanager_exporter_test.go index 11117486f42c..b5f8f888ca10 100644 --- a/exporter/alertmanagerexporter/alertmanager_exporter_test.go +++ b/exporter/alertmanagerexporter/alertmanager_exporter_test.go @@ -138,11 +138,11 @@ func TestAlertManagerExporterEventNameAttributes(t *testing.T) { // test - count of attributes assert.Equal(t, 3, got[0].spanEvent.Attributes().Len()) attr, b := got[0].spanEvent.Attributes().Get("attr1") - assert.Equal(t, true, b) + assert.True(t, b) assert.Equal(t, "unittest-event", got[0].spanEvent.Name()) assert.Equal(t, "unittest-baz", attr.AsString()) attr, b = got[0].spanEvent.Attributes().Get("attr3") - assert.Equal(t, true, b) + assert.True(t, b) assert.Equal(t, 5.14, attr.Double()) } diff --git a/exporter/awss3exporter/s3_writer_test.go b/exporter/awss3exporter/s3_writer_test.go index 6df1998b07d5..350e3284f7d9 100644 --- a/exporter/awss3exporter/s3_writer_test.go +++ b/exporter/awss3exporter/s3_writer_test.go @@ -38,7 +38,7 @@ func TestS3Key(t *testing.T) { re := regexp.MustCompile(`keyprefix/year=2022/month=06/day=05/hour=00/minute=00/fileprefixlogs_([0-9]+).json`) s3Key := getS3Key(tm, "keyprefix", "minute", "fileprefix", "logs", "json", "") matched := re.MatchString(s3Key) - assert.Equal(t, true, matched) + assert.True(t, matched) } func TestS3KeyEmptyFileFormat(t *testing.T) { @@ -52,7 +52,7 @@ func TestS3KeyEmptyFileFormat(t *testing.T) { re := regexp.MustCompile(`keyprefix/year=2022/month=06/day=05/hour=00/minute=00/fileprefixlogs_([0-9]+)`) s3Key := getS3Key(tm, "keyprefix", "minute", "fileprefix", "logs", "", "") matched := re.MatchString(s3Key) - assert.Equal(t, true, matched) + assert.True(t, matched) } func TestS3KeyOfCompressedFile(t *testing.T) { @@ -66,7 +66,7 @@ func TestS3KeyOfCompressedFile(t *testing.T) { re := regexp.MustCompile(`keyprefix/year=2022/month=06/day=05/hour=00/minute=00/fileprefixlogs_([0-9]+).json.gz`) s3Key := getS3Key(tm, "keyprefix", "minute", "fileprefix", "logs", "json", "gzip") matched := re.MatchString(s3Key) - assert.Equal(t, true, matched) + assert.True(t, matched) } func TestS3KeyOfCompressedFileEmptyFileFormat(t *testing.T) { @@ -80,7 +80,7 @@ func TestS3KeyOfCompressedFileEmptyFileFormat(t *testing.T) { re := regexp.MustCompile(`keyprefix/year=2022/month=06/day=05/hour=00/minute=00/fileprefixlogs_([0-9]+).gz`) s3Key := getS3Key(tm, "keyprefix", "minute", "fileprefix", "logs", "", "gzip") matched := re.MatchString(s3Key) - assert.Equal(t, true, matched) + assert.True(t, matched) } func TestGetSessionConfigWithEndpoint(t *testing.T) { diff --git a/exporter/awsxrayexporter/internal/translator/segment_test.go b/exporter/awsxrayexporter/internal/translator/segment_test.go index 8b33edec28eb..1ef895c9c8b4 100644 --- a/exporter/awsxrayexporter/internal/translator/segment_test.go +++ b/exporter/awsxrayexporter/internal/translator/segment_test.go @@ -1252,7 +1252,7 @@ func validateLocalRootDependencySubsegment(t *testing.T, segment *awsxray.Segmen assert.Equal(t, "MySDK", *segment.AWS.XRay.SDK) assert.Equal(t, "1.20.0", *segment.AWS.XRay.SDKVersion) - assert.Equal(t, true, *segment.AWS.XRay.AutoInstrumentation) + assert.True(t, *segment.AWS.XRay.AutoInstrumentation) assert.Equal(t, "UpdateItem", *segment.AWS.Operation) assert.Equal(t, "AWSAccountAttribute", *segment.AWS.AccountID) @@ -1278,7 +1278,7 @@ func validateLocalRootServiceSegment(t *testing.T, segment *awsxray.Segment, spa assert.Equal(t, "service.name=myTest", segment.Metadata["default"]["otel.resource.attributes"]) assert.Equal(t, "MySDK", *segment.AWS.XRay.SDK) assert.Equal(t, "1.20.0", *segment.AWS.XRay.SDKVersion) - assert.Equal(t, true, *segment.AWS.XRay.AutoInstrumentation) + assert.True(t, *segment.AWS.XRay.AutoInstrumentation) assert.Nil(t, segment.AWS.Operation) assert.Nil(t, segment.AWS.AccountID) assert.Nil(t, segment.AWS.RemoteRegion) @@ -1406,7 +1406,7 @@ func TestNonLocalRootConsumerProcess(t *testing.T) { assert.Equal(t, "service.name=myTest", segments[0].Metadata["default"]["otel.resource.attributes"]) assert.Equal(t, "MySDK", *segments[0].AWS.XRay.SDK) assert.Equal(t, "1.20.0", *segments[0].AWS.XRay.SDKVersion) - assert.Equal(t, true, *segments[0].AWS.XRay.AutoInstrumentation) + assert.True(t, *segments[0].AWS.XRay.AutoInstrumentation) assert.Equal(t, "UpdateItem", *segments[0].AWS.Operation) assert.Nil(t, segments[0].Namespace) } @@ -1563,7 +1563,7 @@ func validateLocalRootWithoutDependency(t *testing.T, segment *awsxray.Segment, assert.Equal(t, "service.name=myTest", segment.Metadata["default"]["otel.resource.attributes"]) assert.Equal(t, "MySDK", *segment.AWS.XRay.SDK) assert.Equal(t, "1.20.0", *segment.AWS.XRay.SDKVersion) - assert.Equal(t, true, *segment.AWS.XRay.AutoInstrumentation) + assert.True(t, *segment.AWS.XRay.AutoInstrumentation) assert.Equal(t, "UpdateItem", *segment.AWS.Operation) assert.Equal(t, "AWSAccountAttribute", *segment.AWS.AccountID) diff --git a/exporter/datasetexporter/config_test.go b/exporter/datasetexporter/config_test.go index f036eef975af..545e596b7c75 100644 --- a/exporter/datasetexporter/config_test.go +++ b/exporter/datasetexporter/config_test.go @@ -157,7 +157,7 @@ func TestConfigUseProvidedExportResourceInfoValue(t *testing.T) { }) err := config.Unmarshal(configMap) assert.NoError(t, err) - assert.Equal(t, true, config.LogsSettings.ExportResourceInfo) + assert.True(t, config.LogsSettings.ExportResourceInfo) } func TestConfigUseProvidedExportScopeInfoValue(t *testing.T) { @@ -172,5 +172,5 @@ func TestConfigUseProvidedExportScopeInfoValue(t *testing.T) { }) err := config.Unmarshal(configMap) assert.NoError(t, err) - assert.Equal(t, false, config.LogsSettings.ExportScopeInfo) + assert.False(t, config.LogsSettings.ExportScopeInfo) } diff --git a/exporter/fileexporter/factory_test.go b/exporter/fileexporter/factory_test.go index b56d48de4a6c..1f8647537750 100644 --- a/exporter/fileexporter/factory_test.go +++ b/exporter/fileexporter/factory_test.go @@ -124,7 +124,7 @@ func TestNewFileWriter(t *testing.T) { validate: func(t *testing.T, writer *fileWriter) { assert.Equal(t, 5*time.Second, writer.flushInterval) _, ok := writer.file.(*bufferedWriteCloser) - assert.Equal(t, true, ok) + assert.True(t, ok) }, }, { @@ -139,7 +139,7 @@ func TestNewFileWriter(t *testing.T) { }, validate: func(t *testing.T, writer *fileWriter) { logger, ok := writer.file.(*lumberjack.Logger) - assert.Equal(t, true, ok) + assert.True(t, ok) assert.Equal(t, defaultMaxBackups, logger.MaxBackups) }, }, @@ -158,11 +158,11 @@ func TestNewFileWriter(t *testing.T) { }, validate: func(t *testing.T, writer *fileWriter) { logger, ok := writer.file.(*lumberjack.Logger) - assert.Equal(t, true, ok) + assert.True(t, ok) assert.Equal(t, 3, logger.MaxBackups) assert.Equal(t, 30, logger.MaxSize) assert.Equal(t, 100, logger.MaxAge) - assert.Equal(t, true, logger.LocalTime) + assert.True(t, logger.LocalTime) }, }, } diff --git a/exporter/honeycombmarkerexporter/logs_exporter_test.go b/exporter/honeycombmarkerexporter/logs_exporter_test.go index 7dfa7d3175a6..22f48e8e8c68 100644 --- a/exporter/honeycombmarkerexporter/logs_exporter_test.go +++ b/exporter/honeycombmarkerexporter/logs_exporter_test.go @@ -138,7 +138,7 @@ func TestExportMarkers(t *testing.T) { userAgent := req.Header.Get(userAgentHeaderKey) assert.NotEmpty(t, userAgent) - assert.Equal(t, strings.Contains(userAgent, "OpenTelemetry Collector"), true) + assert.True(t, strings.Contains(userAgent, "OpenTelemetry Collector")) rw.WriteHeader(http.StatusAccepted) })) diff --git a/exporter/logicmonitorexporter/internal/logs/sender_test.go b/exporter/logicmonitorexporter/internal/logs/sender_test.go index 03624995987a..d53b05fc90e1 100644 --- a/exporter/logicmonitorexporter/internal/logs/sender_test.go +++ b/exporter/logicmonitorexporter/internal/logs/sender_test.go @@ -64,7 +64,7 @@ func TestSendLogs(t *testing.T) { err = sender.SendLogs(ctx, []model.LogInput{logInput}) cancel() assert.Error(t, err) - assert.Equal(t, true, consumererror.IsPermanent(err)) + assert.True(t, consumererror.IsPermanent(err)) }) t.Run("should not return permanent failure error", func(t *testing.T) { @@ -87,7 +87,7 @@ func TestSendLogs(t *testing.T) { err = sender.SendLogs(ctx, []model.LogInput{logInput}) cancel() assert.Error(t, err) - assert.Equal(t, false, consumererror.IsPermanent(err)) + assert.False(t, consumererror.IsPermanent(err)) }) } diff --git a/exporter/logicmonitorexporter/internal/traces/sender_test.go b/exporter/logicmonitorexporter/internal/traces/sender_test.go index c9b07aafdf5f..ed1feacefdf4 100644 --- a/exporter/logicmonitorexporter/internal/traces/sender_test.go +++ b/exporter/logicmonitorexporter/internal/traces/sender_test.go @@ -64,7 +64,7 @@ func TestSendTraces(t *testing.T) { err = sender.SendTraces(ctx, testdata.GenerateTraces(1)) cancel() assert.Error(t, err) - assert.Equal(t, true, consumererror.IsPermanent(err)) + assert.True(t, consumererror.IsPermanent(err)) }) t.Run("should not return permanent failure error", func(t *testing.T) { @@ -86,6 +86,6 @@ func TestSendTraces(t *testing.T) { err = sender.SendTraces(ctx, testdata.GenerateTraces(1)) cancel() assert.Error(t, err) - assert.Equal(t, false, consumererror.IsPermanent(err)) + assert.False(t, consumererror.IsPermanent(err)) }) } diff --git a/exporter/lokiexporter/exporter_test.go b/exporter/lokiexporter/exporter_test.go index c0108aa49ce0..a7c1a5cb7fe0 100644 --- a/exporter/lokiexporter/exporter_test.go +++ b/exporter/lokiexporter/exporter_test.go @@ -276,7 +276,7 @@ func TestLogsToLokiRequestWithGroupingByTenant(t *testing.T) { assert.Equal(t, len(actualPushRequestPerTenant), len(tC.expected)) for tenant, request := range actualPushRequestPerTenant { pr, ok := tC.expected[tenant] - assert.Equal(t, ok, true) + assert.True(t, ok) expectedLabel := pr.label expectedLine := pr.line diff --git a/exporter/prometheusexporter/accumulator_test.go b/exporter/prometheusexporter/accumulator_test.go index 43b78ced5f01..49b39c4412bb 100644 --- a/exporter/prometheusexporter/accumulator_test.go +++ b/exporter/prometheusexporter/accumulator_test.go @@ -373,7 +373,7 @@ func TestAccumulateDeltaToCumulative(t *testing.T) { require.Equal(t, mValue, vValue) require.Equal(t, dataPointValue1+dataPointValue2, vValue) require.Equal(t, pmetric.AggregationTemporalityCumulative, vTemporality) - require.Equal(t, true, vIsMonotonic) + require.True(t, vIsMonotonic) require.Equal(t, ts3.Unix(), vTS.Unix()) }) diff --git a/exporter/syslogexporter/exporter_test.go b/exporter/syslogexporter/exporter_test.go index f2720ed4ac8a..212ea99d687c 100644 --- a/exporter/syslogexporter/exporter_test.go +++ b/exporter/syslogexporter/exporter_test.go @@ -168,7 +168,7 @@ func TestSyslogExportFail(t *testing.T) { consumerErr := test.exp.pushLogsData(context.Background(), logs) var consumerErrorLogs consumererror.Logs ok := errors.As(consumerErr, &consumerErrorLogs) - assert.Equal(t, ok, true) + assert.True(t, ok) consumerLogs := consumererror.Logs.Data(consumerErrorLogs) rls := consumerLogs.ResourceLogs() require.Equal(t, 1, rls.Len()) diff --git a/extension/ackextension/inmemory_test.go b/extension/ackextension/inmemory_test.go index 8e893866a5f7..5d9f4333db74 100644 --- a/extension/ackextension/inmemory_test.go +++ b/extension/ackextension/inmemory_test.go @@ -105,9 +105,9 @@ func TestExtensionAck_ProcessEvents_EventsUnAcked(t *testing.T) { for i := 0; i < 100; i++ { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{0, 1, 2}) require.Equal(t, len(result), 3) - require.Equal(t, result[0], false) - require.Equal(t, result[1], false) - require.Equal(t, result[2], false) + require.False(t, result[0]) + require.False(t, result[1]) + require.False(t, result[2]) } } @@ -141,15 +141,15 @@ func TestExtensionAck_ProcessEvents_EventsAcked(t *testing.T) { if i%2 == 0 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Equal(t, len(result), 3) - require.Equal(t, result[1], false) - require.Equal(t, result[2], true) - require.Equal(t, result[3], false) + require.False(t, result[1]) + require.True(t, result[2]) + require.False(t, result[3]) } else { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Equal(t, len(result), 3) - require.Equal(t, result[1], true) - require.Equal(t, result[2], false) - require.Equal(t, result[3], true) + require.True(t, result[1]) + require.False(t, result[2]) + require.True(t, result[3]) } } } @@ -184,15 +184,15 @@ func TestExtensionAck_QueryAcks_Unidempotent(t *testing.T) { if i%2 == 0 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Equal(t, len(result), 3) - require.Equal(t, result[1], false) - require.Equal(t, result[2], true) - require.Equal(t, result[3], false) + require.False(t, result[1]) + require.True(t, result[2]) + require.False(t, result[3]) } else { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Equal(t, len(result), 3) - require.Equal(t, result[1], true) - require.Equal(t, result[2], false) - require.Equal(t, result[3], true) + require.True(t, result[1]) + require.False(t, result[2]) + require.True(t, result[3]) } } @@ -200,9 +200,9 @@ func TestExtensionAck_QueryAcks_Unidempotent(t *testing.T) { for i := 0; i < 100; i++ { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Equal(t, len(result), 3) - require.Equal(t, result[1], false) - require.Equal(t, result[2], false) - require.Equal(t, result[3], false) + require.False(t, result[1]) + require.False(t, result[2]) + require.False(t, result[3]) } } @@ -234,9 +234,9 @@ func TestExtensionAckAsync(t *testing.T) { for i := 0; i < partitionCount; i++ { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Equal(t, len(result), 3) - require.Equal(t, result[1], false) - require.Equal(t, result[2], false) - require.Equal(t, result[3], false) + require.False(t, result[1]) + require.False(t, result[2]) + require.False(t, result[3]) } wg.Add(partitionCount) @@ -260,15 +260,15 @@ func TestExtensionAckAsync(t *testing.T) { if i%2 == 0 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Equal(t, len(result), 3) - require.Equal(t, result[1], false) - require.Equal(t, result[2], true) - require.Equal(t, result[3], false) + require.False(t, result[1]) + require.True(t, result[2]) + require.False(t, result[3]) } else { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) require.Equal(t, len(result), 3) - require.Equal(t, result[1], true) - require.Equal(t, result[2], false) - require.Equal(t, result[3], true) + require.True(t, result[1]) + require.False(t, result[2]) + require.True(t, result[3]) } } wg.Add(100) @@ -286,8 +286,8 @@ func TestExtensionAckAsync(t *testing.T) { for i := 0; i < partitionCount; i++ { result := <-resultChan require.Equal(t, len(result), 3) - require.Equal(t, result[1], false) - require.Equal(t, result[2], false) - require.Equal(t, result[3], false) + require.False(t, result[1]) + require.False(t, result[2]) + require.False(t, result[3]) } } diff --git a/extension/storage/filestorage/factory_test.go b/extension/storage/filestorage/factory_test.go index 448b7fe51047..875a18fec092 100644 --- a/extension/storage/filestorage/factory_test.go +++ b/extension/storage/filestorage/factory_test.go @@ -29,7 +29,7 @@ func TestFactory(t *testing.T) { require.Equal(t, expected, cfg.Directory) } require.Equal(t, time.Second, cfg.Timeout) - require.Equal(t, false, cfg.FSync) + require.False(t, cfg.FSync) tests := []struct { name string diff --git a/internal/aws/containerinsight/utils_test.go b/internal/aws/containerinsight/utils_test.go index 457cb7350b5c..87458895ac70 100644 --- a/internal/aws/containerinsight/utils_test.go +++ b/internal/aws/containerinsight/utils_test.go @@ -69,32 +69,32 @@ func TestMetricName(t *testing.T) { } func TestIsNode(t *testing.T) { - assert.Equal(t, true, IsNode(TypeNode)) - assert.Equal(t, true, IsNode(TypeNodeNet)) - assert.Equal(t, true, IsNode(TypeNodeFS)) - assert.Equal(t, true, IsNode(TypeNodeDiskIO)) - assert.Equal(t, false, IsNode(TypePod)) + assert.True(t, IsNode(TypeNode)) + assert.True(t, IsNode(TypeNodeNet)) + assert.True(t, IsNode(TypeNodeFS)) + assert.True(t, IsNode(TypeNodeDiskIO)) + assert.False(t, IsNode(TypePod)) } func TestIsInstance(t *testing.T) { - assert.Equal(t, true, IsInstance(TypeInstance)) - assert.Equal(t, true, IsInstance(TypeInstanceNet)) - assert.Equal(t, true, IsInstance(TypeInstanceFS)) - assert.Equal(t, true, IsInstance(TypeInstanceDiskIO)) - assert.Equal(t, false, IsInstance(TypePod)) + assert.True(t, IsInstance(TypeInstance)) + assert.True(t, IsInstance(TypeInstanceNet)) + assert.True(t, IsInstance(TypeInstanceFS)) + assert.True(t, IsInstance(TypeInstanceDiskIO)) + assert.False(t, IsInstance(TypePod)) } func TestIsContainer(t *testing.T) { - assert.Equal(t, true, IsContainer(TypeContainer)) - assert.Equal(t, true, IsContainer(TypeContainerDiskIO)) - assert.Equal(t, true, IsContainer(TypeContainerFS)) - assert.Equal(t, false, IsContainer(TypePod)) + assert.True(t, IsContainer(TypeContainer)) + assert.True(t, IsContainer(TypeContainerDiskIO)) + assert.True(t, IsContainer(TypeContainerFS)) + assert.False(t, IsContainer(TypePod)) } func TestIsPod(t *testing.T) { - assert.Equal(t, true, IsPod(TypePod)) - assert.Equal(t, true, IsPod(TypePodNet)) - assert.Equal(t, false, IsPod(TypeInstance)) + assert.True(t, IsPod(TypePod)) + assert.True(t, IsPod(TypePodNet)) + assert.False(t, IsPod(TypeInstance)) } func convertToInt64(value any) int64 { @@ -145,7 +145,7 @@ func checkMetricsAreExpected(t *testing.T, md pmetric.Metrics, fields map[string for key, val := range tags { log.Printf("key=%v value=%v", key, val) attr, ok := attributes.Get(key) - assert.Equal(t, true, ok) + assert.True(t, ok) if key == Timestamp { timeUnixNano, _ = strconv.ParseUint(val, 10, 64) val = strconv.FormatUint(timeUnixNano/uint64(time.Millisecond), 10) diff --git a/internal/aws/metrics/metric_calculator_test.go b/internal/aws/metrics/metric_calculator_test.go index 27a32d0f62ae..04e41829489c 100644 --- a/internal/aws/metrics/metric_calculator_test.go +++ b/internal/aws/metrics/metric_calculator_test.go @@ -106,13 +106,13 @@ func TestMapWithExpiryAdd(t *testing.T) { store.Set(Key{MetricMetadata: "key1"}, MetricValue{RawValue: value1}) val, ok := store.Get(Key{MetricMetadata: "key1"}) store.Unlock() - assert.Equal(t, true, ok) + assert.True(t, ok) assert.Equal(t, value1, val.RawValue) store.Lock() defer store.Unlock() val, ok = store.Get(Key{MetricMetadata: "key2"}) - assert.Equal(t, false, ok) + assert.False(t, ok) assert.True(t, val == nil) require.NoError(t, store.Shutdown()) } @@ -134,7 +134,7 @@ func TestMapWithExpiryCleanup(t *testing.T) { val, ok := store.Get(Key{MetricMetadata: "key1"}) - assert.Equal(t, true, ok) + assert.True(t, ok) assert.Equal(t, value1, val.RawValue.(float64)) assert.Equal(t, 1, store.Size()) store.Unlock() @@ -143,7 +143,7 @@ func TestMapWithExpiryCleanup(t *testing.T) { store.CleanUp(time.Now()) store.Lock() val, ok = store.Get(Key{MetricMetadata: "key1"}) - assert.Equal(t, false, ok) + assert.False(t, ok) assert.True(t, val == nil) assert.Equal(t, 0, store.Size()) store.Unlock() diff --git a/internal/otelarrow/test/e2e_test.go b/internal/otelarrow/test/e2e_test.go index 080b44f58e86..85783b00361a 100644 --- a/internal/otelarrow/test/e2e_test.go +++ b/internal/otelarrow/test/e2e_test.go @@ -377,7 +377,7 @@ func consumerFailure(t *testing.T, err error) { require.Error(t, err) // there should be no permanent errors anywhere in this test. - require.True(t, !consumererror.IsPermanent(err), + require.False(t, consumererror.IsPermanent(err), "should not be permanent: %v", err) stat, ok := status.FromError(err) diff --git a/pkg/experimentalmetricmetadata/entity_events_test.go b/pkg/experimentalmetricmetadata/entity_events_test.go index e6e5e1bf66e5..13c13cec2f35 100644 --- a/pkg/experimentalmetricmetadata/entity_events_test.go +++ b/pkg/experimentalmetricmetadata/entity_events_test.go @@ -91,7 +91,7 @@ func Test_EntityEventsSlice_ConvertAndMoveToLogs(t *testing.T) { // Check the Scope v, ok := scopeLogs.Scope().Attributes().Get(semconvOtelEntityEventAsScope) assert.True(t, ok) - assert.Equal(t, true, v.Bool()) + assert.True(t, v.Bool()) records := scopeLogs.LogRecords() assert.Equal(t, 2, records.Len()) diff --git a/pkg/translator/loki/logs_to_loki_test.go b/pkg/translator/loki/logs_to_loki_test.go index af5c09ff9f4e..d14486089658 100644 --- a/pkg/translator/loki/logs_to_loki_test.go +++ b/pkg/translator/loki/logs_to_loki_test.go @@ -218,7 +218,7 @@ func TestLogsToLokiRequestWithGroupingByTenant(t *testing.T) { for tenant, request := range requests { want, ok := tt.expected[tenant] - assert.Equal(t, ok, true) + assert.True(t, ok) streams := request.Streams for s := 0; s < len(streams); s++ { diff --git a/processor/groupbyattrsprocessor/factory_test.go b/processor/groupbyattrsprocessor/factory_test.go index 1b4cd30b1319..885049330a87 100644 --- a/processor/groupbyattrsprocessor/factory_test.go +++ b/processor/groupbyattrsprocessor/factory_test.go @@ -26,17 +26,17 @@ func TestCreateTestProcessor(t *testing.T) { tp, err := createTracesProcessor(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, tp) - assert.Equal(t, true, tp.Capabilities().MutatesData) + assert.True(t, tp.Capabilities().MutatesData) lp, err := createLogsProcessor(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, lp) - assert.Equal(t, true, lp.Capabilities().MutatesData) + assert.True(t, lp.Capabilities().MutatesData) mp, err := createMetricsProcessor(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, mp) - assert.Equal(t, true, mp.Capabilities().MutatesData) + assert.True(t, mp.Capabilities().MutatesData) } func TestNoKeys(t *testing.T) { diff --git a/processor/groupbytraceprocessor/processor_test.go b/processor/groupbytraceprocessor/processor_test.go index ddc7675a055f..4e65362e5599 100644 --- a/processor/groupbytraceprocessor/processor_test.go +++ b/processor/groupbytraceprocessor/processor_test.go @@ -152,7 +152,7 @@ func TestProcessorCapabilities(t *testing.T) { // verify assert.NotNil(t, p) - assert.Equal(t, true, caps.MutatesData) + assert.True(t, caps.MutatesData) } func TestProcessBatchDoesntFail(t *testing.T) { diff --git a/processor/k8sattributesprocessor/e2e_test.go b/processor/k8sattributesprocessor/e2e_test.go index ef0b553ca320..780dd6fe9793 100644 --- a/processor/k8sattributesprocessor/e2e_test.go +++ b/processor/k8sattributesprocessor/e2e_test.go @@ -896,7 +896,7 @@ func scanTracesForAttributes(t *testing.T, ts *consumertest.TracesSink, expected for i := 0; i < traces.ResourceSpans().Len(); i++ { resource := traces.ResourceSpans().At(i).Resource() service, exist := resource.Attributes().Get("service.name") - assert.Equal(t, true, exist, "span do not has 'service.name' attribute in resource") + assert.True(t, exist, "span do not has 'service.name' attribute in resource") if service.AsString() != expectedService { continue } @@ -917,7 +917,7 @@ func scanMetricsForAttributes(t *testing.T, ms *consumertest.MetricsSink, expect for i := 0; i < metrics.ResourceMetrics().Len(); i++ { resource := metrics.ResourceMetrics().At(i).Resource() service, exist := resource.Attributes().Get("service.name") - assert.Equal(t, true, exist, "metric do not has 'service.name' attribute in resource") + assert.True(t, exist, "metric do not has 'service.name' attribute in resource") if service.AsString() != expectedService { continue } @@ -938,7 +938,7 @@ func scanLogsForAttributes(t *testing.T, ls *consumertest.LogsSink, expectedServ for i := 0; i < logs.ResourceLogs().Len(); i++ { resource := logs.ResourceLogs().At(i).Resource() service, exist := resource.Attributes().Get("service.name") - assert.Equal(t, true, exist, "log do not has 'service.name' attribute in resource") + assert.True(t, exist, "log do not has 'service.name' attribute in resource") if service.AsString() != expectedService { continue } diff --git a/processor/metricstransformprocessor/metrics_transform_processor_group_test.go b/processor/metricstransformprocessor/metrics_transform_processor_group_test.go index 1f38ab8b3a49..85bf8b9e7694 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor_group_test.go +++ b/processor/metricstransformprocessor/metrics_transform_processor_group_test.go @@ -75,7 +75,7 @@ func TestMetricsGrouping(t *testing.T) { require.NoError(t, err) caps := mtp.Capabilities() - assert.Equal(t, true, caps.MutatesData) + assert.True(t, caps.MutatesData) input, err := golden.ReadMetrics(filepath.Join("testdata", "operation_group", test.name+"_in.yaml")) require.NoError(t, err) diff --git a/processor/metricstransformprocessor/metrics_transform_processor_test.go b/processor/metricstransformprocessor/metrics_transform_processor_test.go index e746db070630..22e5b851a304 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor_test.go +++ b/processor/metricstransformprocessor/metrics_transform_processor_test.go @@ -37,7 +37,7 @@ func TestMetricsTransformProcessor(t *testing.T) { require.NoError(t, err) caps := mtp.Capabilities() - assert.Equal(t, true, caps.MutatesData) + assert.True(t, caps.MutatesData) // process inMetrics := pmetric.NewMetrics() diff --git a/processor/redactionprocessor/factory_test.go b/processor/redactionprocessor/factory_test.go index e0a667fc0b55..7afe5741c794 100644 --- a/processor/redactionprocessor/factory_test.go +++ b/processor/redactionprocessor/factory_test.go @@ -24,5 +24,5 @@ func TestCreateTestProcessor(t *testing.T) { tp, err := createTracesProcessor(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, tp) - assert.Equal(t, true, tp.Capabilities().MutatesData) + assert.True(t, tp.Capabilities().MutatesData) } diff --git a/processor/routingprocessor/logs_test.go b/processor/routingprocessor/logs_test.go index e0f94ef90ef7..2639517ceecb 100644 --- a/processor/routingprocessor/logs_test.go +++ b/processor/routingprocessor/logs_test.go @@ -32,7 +32,7 @@ func TestLogProcessorCapabilities(t *testing.T) { require.NotNil(t, p) // verify - assert.Equal(t, false, p.Capabilities().MutatesData) + assert.False(t, p.Capabilities().MutatesData) } func TestLogs_RoutingWorks_Context(t *testing.T) { diff --git a/processor/routingprocessor/metrics_test.go b/processor/routingprocessor/metrics_test.go index d5404eaa0aaf..2d6937b890d2 100644 --- a/processor/routingprocessor/metrics_test.go +++ b/processor/routingprocessor/metrics_test.go @@ -33,7 +33,7 @@ func TestMetricProcessorCapabilities(t *testing.T) { require.NotNil(t, p) // verify - assert.Equal(t, false, p.Capabilities().MutatesData) + assert.False(t, p.Capabilities().MutatesData) } func TestMetrics_AreCorrectlySplitPerResourceAttributeRouting(t *testing.T) { diff --git a/processor/routingprocessor/traces_test.go b/processor/routingprocessor/traces_test.go index 0be65e294c18..d7ae15ceb4ec 100644 --- a/processor/routingprocessor/traces_test.go +++ b/processor/routingprocessor/traces_test.go @@ -515,7 +515,7 @@ func TestTraceProcessorCapabilities(t *testing.T) { require.NotNil(t, p) // verify - assert.Equal(t, false, p.Capabilities().MutatesData) + assert.False(t, p.Capabilities().MutatesData) } type mockTracesExporter struct { diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor_test.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor_test.go index 41dd6a57ef62..5e942cf80339 100644 --- a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor_test.go +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor_test.go @@ -107,11 +107,11 @@ func TestFSStats(t *testing.T) { func TestAllowList(t *testing.T) { extractor := NewFileSystemMetricExtractor(nil) - assert.Equal(t, true, extractor.allowListRegexP.MatchString("/dev/shm")) - assert.Equal(t, true, extractor.allowListRegexP.MatchString("tmpfs")) - assert.Equal(t, true, extractor.allowListRegexP.MatchString("overlay")) - assert.Equal(t, false, extractor.allowListRegexP.MatchString("overlaytest")) - assert.Equal(t, false, extractor.allowListRegexP.MatchString("/dev")) + assert.True(t, extractor.allowListRegexP.MatchString("/dev/shm")) + assert.True(t, extractor.allowListRegexP.MatchString("tmpfs")) + assert.True(t, extractor.allowListRegexP.MatchString("overlay")) + assert.False(t, extractor.allowListRegexP.MatchString("overlaytest")) + assert.False(t, extractor.allowListRegexP.MatchString("/dev")) } func TestFSStatsWithAllowList(t *testing.T) { diff --git a/receiver/awscontainerinsightreceiver/internal/ecsInfo/utils_test.go b/receiver/awscontainerinsightreceiver/internal/ecsInfo/utils_test.go index 7060b60ad52b..5da8c810b219 100644 --- a/receiver/awscontainerinsightreceiver/internal/ecsInfo/utils_test.go +++ b/receiver/awscontainerinsightreceiver/internal/ecsInfo/utils_test.go @@ -42,11 +42,11 @@ func TestIsClosed(t *testing.T) { channel := make(chan bool) - assert.Equal(t, false, isClosed(channel)) + assert.False(t, isClosed(channel)) close(channel) - assert.Equal(t, true, isClosed(channel)) + assert.True(t, isClosed(channel)) } diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator_test.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator_test.go index 6f03a80f882e..f12c8bd15f2c 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator_test.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator_test.go @@ -217,12 +217,12 @@ func TestGetMetricsDataCpuReservedZero(t *testing.T) { require.Less(t, 0, len(acc.mds)) } func TestIsEmptyStats(t *testing.T) { - require.EqualValues(t, false, isEmptyStats(&containerStats)) - require.EqualValues(t, true, isEmptyStats(cstats["002"])) + require.False(t, isEmptyStats(&containerStats)) + require.True(t, isEmptyStats(cstats["002"])) cstats = map[string]*ContainerStats{"001": nil} - require.EqualValues(t, true, isEmptyStats(cstats["001"])) + require.True(t, isEmptyStats(cstats["001"])) cstats = map[string]*ContainerStats{"001": {}} - require.EqualValues(t, true, isEmptyStats(cstats["001"])) + require.True(t, isEmptyStats(cstats["001"])) } func TestCalculateDuration(t *testing.T) { diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource_test.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource_test.go index 004805937495..2dcf2129c6b7 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource_test.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource_test.go @@ -64,7 +64,7 @@ func TestContainerResourceForStoppedContainer(t *testing.T) { require.NotNil(t, r) attrMap := r.Attributes() getExitCodeAd, found := attrMap.Get(attributeContainerExitCode) - require.EqualValues(t, true, found) + require.True(t, found) require.EqualValues(t, 2, getExitCodeAd.Int()) require.EqualValues(t, 11, attrMap.Len()) expected := map[string]string{ @@ -165,7 +165,7 @@ func TestTaskResourceWithClusterARN(t *testing.T) { func verifyAttributeMap(t *testing.T, expected map[string]string, found pcommon.Map) { for key, val := range expected { attributeVal, found := found.Get(key) - require.EqualValues(t, true, found) + require.True(t, found) require.EqualValues(t, val, attributeVal.Str()) } diff --git a/receiver/bigipreceiver/client_test.go b/receiver/bigipreceiver/client_test.go index 417a06aca706..4bf162a75135 100644 --- a/receiver/bigipreceiver/client_test.go +++ b/receiver/bigipreceiver/client_test.go @@ -118,7 +118,7 @@ func TestGetNewToken(t *testing.T) { err := tc.GetNewToken(context.Background()) require.EqualError(t, err, "non 200 code returned 401") hasToken := tc.HasToken() - require.Equal(t, hasToken, false) + require.False(t, hasToken) }, }, { @@ -136,7 +136,7 @@ func TestGetNewToken(t *testing.T) { err := tc.GetNewToken(context.Background()) require.Contains(t, err.Error(), "failed to decode response payload") hasToken := tc.HasToken() - require.Equal(t, hasToken, false) + require.False(t, hasToken) }, }, { @@ -156,7 +156,7 @@ func TestGetNewToken(t *testing.T) { err := tc.GetNewToken(context.Background()) require.NoError(t, err) hasToken := tc.HasToken() - require.Equal(t, hasToken, true) + require.True(t, hasToken) }, }, } diff --git a/receiver/datadogreceiver/receiver_test.go b/receiver/datadogreceiver/receiver_test.go index 2f5bab833494..526b8967fccc 100644 --- a/receiver/datadogreceiver/receiver_test.go +++ b/receiver/datadogreceiver/receiver_test.go @@ -308,7 +308,7 @@ func TestDatadogMetricsV1_EndToEnd(t *testing.T) { assert.Equal(t, pmetric.MetricTypeSum, metric.Type()) assert.Equal(t, "system.load.1", metric.Name()) assert.Equal(t, pmetric.AggregationTemporalityDelta, metric.Sum().AggregationTemporality()) - assert.Equal(t, false, metric.Sum().IsMonotonic()) + assert.False(t, metric.Sum().IsMonotonic()) assert.Equal(t, pcommon.Timestamp(1636629071*1_000_000_000), metric.Sum().DataPoints().At(0).Timestamp()) assert.Equal(t, 0.7, metric.Sum().DataPoints().At(0).DoubleValue()) expectedEnvironment, _ := metric.Sum().DataPoints().At(0).Attributes().Get("environment") @@ -386,7 +386,7 @@ func TestDatadogMetricsV2_EndToEnd(t *testing.T) { assert.Equal(t, pmetric.MetricTypeSum, metric.Type()) assert.Equal(t, "system.load.1", metric.Name()) assert.Equal(t, pmetric.AggregationTemporalityDelta, metric.Sum().AggregationTemporality()) - assert.Equal(t, false, metric.Sum().IsMonotonic()) + assert.False(t, metric.Sum().IsMonotonic()) assert.Equal(t, pcommon.Timestamp(1636629071*1_000_000_000), metric.Sum().DataPoints().At(0).Timestamp()) assert.Equal(t, 1.5, metric.Sum().DataPoints().At(0).DoubleValue()) assert.Equal(t, pcommon.Timestamp(0), metric.Sum().DataPoints().At(0).StartTimestamp()) diff --git a/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go b/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go index 85a00e44019f..7b0a84a7d163 100644 --- a/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go +++ b/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go @@ -35,11 +35,11 @@ func TestK8sEventToLogDataWithApiAndResourceVersion(t *testing.T) { ld := k8sEventToLogData(zap.NewNop(), k8sEvent) attrs := ld.ResourceLogs().At(0).Resource().Attributes() attr, ok := attrs.Get("k8s.object.api_version") - assert.Equal(t, true, ok) + assert.True(t, ok) assert.Equal(t, "v1", attr.AsString()) attr, ok = attrs.Get("k8s.object.resource_version") - assert.Equal(t, true, ok) + assert.True(t, ok) assert.Equal(t, "", attr.AsString()) // add ResourceVersion @@ -47,7 +47,7 @@ func TestK8sEventToLogDataWithApiAndResourceVersion(t *testing.T) { ld = k8sEventToLogData(zap.NewNop(), k8sEvent) attrs = ld.ResourceLogs().At(0).Resource().Attributes() attr, ok = attrs.Get("k8s.object.resource_version") - assert.Equal(t, true, ok) + assert.True(t, ok) assert.Equal(t, "7387066320", attr.AsString()) } diff --git a/receiver/k8seventsreceiver/receiver_test.go b/receiver/k8seventsreceiver/receiver_test.go index f096d00c68e1..88d00b204e89 100644 --- a/receiver/k8seventsreceiver/receiver_test.go +++ b/receiver/k8seventsreceiver/receiver_test.go @@ -118,15 +118,15 @@ func TestAllowEvent(t *testing.T) { k8sEvent := getEvent() shouldAllowEvent := recv.allowEvent(k8sEvent) - assert.Equal(t, shouldAllowEvent, true) + assert.True(t, shouldAllowEvent) k8sEvent.FirstTimestamp = v1.Time{Time: time.Now().Add(-time.Hour)} shouldAllowEvent = recv.allowEvent(k8sEvent) - assert.Equal(t, shouldAllowEvent, false) + assert.False(t, shouldAllowEvent) k8sEvent.FirstTimestamp = v1.Time{} shouldAllowEvent = recv.allowEvent(k8sEvent) - assert.Equal(t, shouldAllowEvent, false) + assert.False(t, shouldAllowEvent) } func getEvent() *corev1.Event { diff --git a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go index 5735e6d084cb..56623eecadd8 100644 --- a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go +++ b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go @@ -86,7 +86,7 @@ func TestUnstructuredListToLogData(t *testing.T) { resourceAttributes := rl.Resource().Attributes() logRecords := rl.ScopeLogs().At(0).LogRecords() _, ok := resourceAttributes.Get(semconv.AttributeK8SNamespaceName) - assert.Equal(t, ok, false) + assert.False(t, ok) assert.Equal(t, rl.ScopeLogs().Len(), 1) assert.Equal(t, logRecords.Len(), 3) diff --git a/receiver/kafkametricsreceiver/broker_scraper_test.go b/receiver/kafkametricsreceiver/broker_scraper_test.go index b506bfdee400..590454bb8b80 100644 --- a/receiver/kafkametricsreceiver/broker_scraper_test.go +++ b/receiver/kafkametricsreceiver/broker_scraper_test.go @@ -110,7 +110,7 @@ func TestBrokerScraper_empty_resource_attribute(t *testing.T) { require.Equal(t, 1, md.ResourceMetrics().Len()) require.Equal(t, 1, md.ResourceMetrics().At(0).ScopeMetrics().Len()) _, ok := md.ResourceMetrics().At(0).Resource().Attributes().Get("kafka.cluster.alias") - require.Equal(t, false, ok) + require.False(t, ok) } func TestBrokerScraper_scrape(t *testing.T) { diff --git a/receiver/kafkareceiver/header_extraction_test.go b/receiver/kafkareceiver/header_extraction_test.go index 01e84538ea15..c2dacfff103f 100644 --- a/receiver/kafkareceiver/header_extraction_test.go +++ b/receiver/kafkareceiver/header_extraction_test.go @@ -215,6 +215,6 @@ func TestHeaderExtractionMetrics(t *testing.T) { func validateHeader(t *testing.T, rs pcommon.Resource, headerKey string, headerValue string) { val, ok := rs.Attributes().Get(headerKey) - assert.Equal(t, ok, true) + assert.True(t, ok) assert.Equal(t, val.Str(), headerValue) } diff --git a/receiver/prometheusreceiver/config_test.go b/receiver/prometheusreceiver/config_test.go index 36fa893baf1e..70d519c55549 100644 --- a/receiver/prometheusreceiver/config_test.go +++ b/receiver/prometheusreceiver/config_test.go @@ -43,8 +43,8 @@ func TestLoadConfig(t *testing.T) { r1 := cfg.(*Config) assert.Equal(t, r1.PrometheusConfig.ScrapeConfigs[0].JobName, "demo") assert.Equal(t, time.Duration(r1.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval), 5*time.Second) - assert.Equal(t, r1.UseStartTimeMetric, true) - assert.Equal(t, r1.TrimMetricSuffixes, true) + assert.True(t, r1.UseStartTimeMetric) + assert.True(t, r1.TrimMetricSuffixes) assert.Equal(t, r1.StartTimeMetricRegex, "^(.+_)*process_start_time_seconds$") assert.True(t, r1.ReportExtraScrapeMetrics) diff --git a/receiver/prometheusreceiver/targetallocator/manager_test.go b/receiver/prometheusreceiver/targetallocator/manager_test.go index 3845f75ab67d..7423bf109fc1 100644 --- a/receiver/prometheusreceiver/targetallocator/manager_test.go +++ b/receiver/prometheusreceiver/targetallocator/manager_test.go @@ -775,8 +775,8 @@ func TestConfigureSDHTTPClientConfigFromTA(t *testing.T) { assert.NoError(t, err) - assert.Equal(t, false, httpSD.HTTPClientConfig.FollowRedirects) - assert.Equal(t, true, httpSD.HTTPClientConfig.TLSConfig.InsecureSkipVerify) + assert.False(t, httpSD.HTTPClientConfig.FollowRedirects) + assert.True(t, httpSD.HTTPClientConfig.TLSConfig.InsecureSkipVerify) assert.Equal(t, "test.server", httpSD.HTTPClientConfig.TLSConfig.ServerName) assert.Equal(t, "/path/to/ca", httpSD.HTTPClientConfig.TLSConfig.CAFile) assert.Equal(t, "/path/to/cert", httpSD.HTTPClientConfig.TLSConfig.CertFile) diff --git a/receiver/snmpreceiver/otel_metric_helper_test.go b/receiver/snmpreceiver/otel_metric_helper_test.go index 2e88cecebb59..3a4332ba2bb7 100644 --- a/receiver/snmpreceiver/otel_metric_helper_test.go +++ b/receiver/snmpreceiver/otel_metric_helper_test.go @@ -113,7 +113,7 @@ func TestCreateResource(t *testing.T) { actual := helper.createResource("r1", map[string]string{"key1": "val1"}) require.NotNil(t, actual) val, exists := actual.Resource().Attributes().Get("key1") - require.Equal(t, true, exists) + require.True(t, exists) require.Equal(t, "val1", val.AsString()) require.Equal(t, actual, helper.resourcesByKey["r1"]) }, @@ -248,7 +248,7 @@ func TestCreateMetric(t *testing.T) { require.NotNil(t, actual) require.Equal(t, "description", actual.Description()) require.Equal(t, pmetric.AggregationTemporalityDelta, actual.Sum().AggregationTemporality()) - require.Equal(t, false, actual.Sum().IsMonotonic()) + require.False(t, actual.Sum().IsMonotonic()) require.Equal(t, "m1", actual.Name()) require.Equal(t, "1", actual.Unit()) require.Equal(t, actual, helper.metricsByResource["r1"]["m1"]) @@ -345,7 +345,7 @@ func TestAddMetricDataPoint(t *testing.T) { require.NoError(t, err) require.Equal(t, data.value, actual.IntValue()) val, exists := actual.Attributes().Get("key1") - require.Equal(t, true, exists) + require.True(t, exists) require.Equal(t, "val1", val.AsString()) metricDataPoint := metric.Gauge().DataPoints().At(0) require.Equal(t, &metricDataPoint, actual) @@ -385,7 +385,7 @@ func TestAddMetricDataPoint(t *testing.T) { require.NoError(t, err) require.Equal(t, data.value, actual.DoubleValue()) val, exists := actual.Attributes().Get("key1") - require.Equal(t, true, exists) + require.True(t, exists) require.Equal(t, "val1", val.AsString()) metricDataPoint := metric.Sum().DataPoints().At(0) require.Equal(t, &metricDataPoint, actual) @@ -421,7 +421,7 @@ func TestAddMetricDataPoint(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(10), actual.IntValue()) val, exists := actual.Attributes().Get("key1") - require.Equal(t, true, exists) + require.True(t, exists) require.Equal(t, "val1", val.AsString()) metricDataPoint := metric.Gauge().DataPoints().At(0) require.Equal(t, &metricDataPoint, actual) @@ -457,7 +457,7 @@ func TestAddMetricDataPoint(t *testing.T) { require.NoError(t, err) require.Equal(t, float64(10.0), actual.DoubleValue()) val, exists := actual.Attributes().Get("key1") - require.Equal(t, true, exists) + require.True(t, exists) require.Equal(t, "val1", val.AsString()) metricDataPoint := metric.Gauge().DataPoints().At(0) require.Equal(t, &metricDataPoint, actual) diff --git a/receiver/solacereceiver/unmarshaller_receive_test.go b/receiver/solacereceiver/unmarshaller_receive_test.go index cfda03246474..992caffb8651 100644 --- a/receiver/solacereceiver/unmarshaller_receive_test.go +++ b/receiver/solacereceiver/unmarshaller_receive_test.go @@ -762,7 +762,7 @@ func TestReceiveUnmarshallerInsertUserProperty(t *testing.T) { &receive_v1.SpanData_UserPropertyValue_BoolValue{BoolValue: true}, pcommon.ValueTypeBool, func(val pcommon.Value) { - assert.Equal(t, true, val.Bool()) + assert.True(t, val.Bool()) }, }, { From 240ff76cb71917177fc63d79b794c3f80b255b17 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Thu, 29 Aug 2024 09:55:20 -0600 Subject: [PATCH 07/10] [pkg/ottl] Remove tracing from OTTL (#34910) **Description:** Reverts https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/33508 since it was causing performance issues **Link to tracking Issue:** Reopens https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33433 Related to https://github.com/open-telemetry/opentelemetry-collector/issues/10858 Closes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/34890 --- .chloggen/ottl-remove-tracing.yaml | 27 +++++ pkg/ottl/README.md | 18 --- pkg/ottl/go.mod | 6 +- pkg/ottl/go.sum | 8 +- pkg/ottl/parser.go | 64 +--------- pkg/ottl/parser_test.go | 184 +---------------------------- 6 files changed, 42 insertions(+), 265 deletions(-) create mode 100644 .chloggen/ottl-remove-tracing.yaml diff --git a/.chloggen/ottl-remove-tracing.yaml b/.chloggen/ottl-remove-tracing.yaml new file mode 100644 index 000000000000..b8ad5432f5e9 --- /dev/null +++ b/.chloggen/ottl-remove-tracing.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: ottl + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Remove tracing from OTTL due to performance concerns + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34910] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/pkg/ottl/README.md b/pkg/ottl/README.md index b4962f84a2d7..57a53ac9e053 100644 --- a/pkg/ottl/README.md +++ b/pkg/ottl/README.md @@ -151,21 +151,3 @@ service: 2024-05-29T16:38:09.600-0600 debug ottl@v0.101.0/parser.go:268 TransformContext after statement execution {"kind": "processor", "name": "transform", "pipeline": "logs", "statement": "set(instrumentation_scope.attributes[\"test\"], [\"pass\"])", "condition matched": true, "TransformContext": {"resource": {"attributes": {"test": "pass"}, "dropped_attribute_count": 0}, "scope": {"attributes": {"test": ["pass"]}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} 2024-05-29T16:38:09.601-0600 debug ottl@v0.101.0/parser.go:268 TransformContext after statement execution {"kind": "processor", "name": "transform", "pipeline": "logs", "statement": "set(attributes[\"test\"], true)", "condition matched": true, "TransformContext": {"resource": {"attributes": {"test": "pass"}, "dropped_attribute_count": 0}, "scope": {"attributes": {"test": ["pass"]}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log", "test": true}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} ``` - -If configured to do so, the collector also emits traces for the execution of OTTL statement sequences. -These traces contain spans for the execution of each statement, including the statement itself and whether it has -been applied or not. To make use of this, enable the self monitoring of the collector by setting the -`--feature-gates=telemetry.useOtelWithSDKConfigurationForInternalTelemetry` flag, and using the following configuration -to export the traces to e.g. an OTLP API endpoint: - -```yaml -service: - telemetry: - traces: - processors: - - batch: - exporter: - otlp: - protocol: http/protobuf - endpoint: ${env:OTLP_ENDPOINT}/v1/traces -``` diff --git a/pkg/ottl/go.mod b/pkg/ottl/go.mod index e9ba4dddab18..d9e172032a1b 100644 --- a/pkg/ottl/go.mod +++ b/pkg/ottl/go.mod @@ -16,8 +16,6 @@ require ( go.opentelemetry.io/collector/component v0.108.1 go.opentelemetry.io/collector/pdata v1.14.1 go.opentelemetry.io/collector/semconv v0.108.1 - go.opentelemetry.io/otel v1.29.0 - go.opentelemetry.io/otel/sdk v1.29.0 go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 @@ -45,11 +43,13 @@ require ( github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.108.1 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/sys v0.24.0 // indirect + golang.org/x/sys v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect google.golang.org/grpc v1.65.0 // indirect diff --git a/pkg/ottl/go.sum b/pkg/ottl/go.sum index 67fa66aff493..9eefe701513b 100644 --- a/pkg/ottl/go.sum +++ b/pkg/ottl/go.sum @@ -88,8 +88,8 @@ go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUT go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= @@ -119,8 +119,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= diff --git a/pkg/ottl/parser.go b/pkg/ottl/parser.go index f8d5ab7dabc8..4a0b3cabe7be 100644 --- a/pkg/ottl/parser.go +++ b/pkg/ottl/parser.go @@ -10,18 +10,9 @@ import ( "github.com/alecthomas/participle/v2" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" "go.uber.org/zap" ) -const ( - logAttributeTraceID = "trace_id" - logAttributeSpanID = "span_id" -) - // Statement holds a top level Statement for processing telemetry data. A Statement is a combination of a function // invocation and the boolean expression to match telemetry for invoking the function. type Statement[K any] struct { @@ -240,7 +231,6 @@ type StatementSequence[K any] struct { statements []*Statement[K] errorMode ErrorMode telemetrySettings component.TelemetrySettings - tracer trace.Tracer } type StatementSequenceOption[K any] func(*StatementSequence[K]) @@ -260,10 +250,6 @@ func NewStatementSequence[K any](statements []*Statement[K], telemetrySettings c statements: statements, errorMode: PropagateError, telemetrySettings: telemetrySettings, - tracer: &noop.Tracer{}, - } - if telemetrySettings.TracerProvider != nil { - s.tracer = telemetrySettings.TracerProvider.Tracer("ottl") } for _, op := range options { op(&s) @@ -276,62 +262,20 @@ func NewStatementSequence[K any](statements []*Statement[K], telemetrySettings c // When the ErrorMode of the StatementSequence is `ignore`, errors are logged and execution continues to the next statement. // When the ErrorMode of the StatementSequence is `silent`, errors are not logged and execution continues to the next statement. func (s *StatementSequence[K]) Execute(ctx context.Context, tCtx K) error { - ctx, sequenceSpan := s.tracer.Start(ctx, "ottl/StatementSequenceExecution") - defer sequenceSpan.End() - s.telemetrySettings.Logger.Debug( - "initial TransformContext", - zap.Any("TransformContext", tCtx), - zap.String(logAttributeTraceID, sequenceSpan.SpanContext().TraceID().String()), - zap.String(logAttributeSpanID, sequenceSpan.SpanContext().SpanID().String()), - ) + s.telemetrySettings.Logger.Debug("initial TransformContext", zap.Any("TransformContext", tCtx)) for _, statement := range s.statements { - statementCtx, statementSpan := s.tracer.Start(ctx, "ottl/StatementExecution") - statementSpan.SetAttributes( - attribute.KeyValue{ - Key: "statement", - Value: attribute.StringValue(statement.origText), - }, - ) - _, condition, err := statement.Execute(statementCtx, tCtx) - statementSpan.SetAttributes( - attribute.KeyValue{ - Key: "condition.matched", - Value: attribute.BoolValue(condition), - }, - ) - s.telemetrySettings.Logger.Debug( - "TransformContext after statement execution", - zap.String("statement", statement.origText), - zap.Bool("condition matched", condition), - zap.Any("TransformContext", tCtx), - zap.String(logAttributeTraceID, statementSpan.SpanContext().TraceID().String()), - zap.String(logAttributeSpanID, statementSpan.SpanContext().SpanID().String()), - ) + _, condition, err := statement.Execute(ctx, tCtx) + s.telemetrySettings.Logger.Debug("TransformContext after statement execution", zap.String("statement", statement.origText), zap.Bool("condition matched", condition), zap.Any("TransformContext", tCtx)) if err != nil { - statementSpan.RecordError(err) - errMsg := fmt.Sprintf("failed to execute statement '%s': %v", statement.origText, err) - statementSpan.SetStatus(codes.Error, errMsg) if s.errorMode == PropagateError { - sequenceSpan.SetStatus(codes.Error, errMsg) - statementSpan.End() err = fmt.Errorf("failed to execute statement: %v, %w", statement.origText, err) return err } if s.errorMode == IgnoreError { - s.telemetrySettings.Logger.Warn( - "failed to execute statement", - zap.Error(err), - zap.String("statement", statement.origText), - zap.String(logAttributeTraceID, statementSpan.SpanContext().TraceID().String()), - zap.String(logAttributeSpanID, statementSpan.SpanContext().SpanID().String()), - ) + s.telemetrySettings.Logger.Warn("failed to execute statement", zap.Error(err), zap.String("statement", statement.origText)) } - } else { - statementSpan.SetStatus(codes.Ok, "statement executed successfully") } - statementSpan.End() } - sequenceSpan.SetStatus(codes.Ok, "statement sequence executed successfully") return nil } diff --git a/pkg/ottl/parser_test.go b/pkg/ottl/parser_test.go index 409d5ab34b5d..dc475b2b7d6a 100644 --- a/pkg/ottl/parser_test.go +++ b/pkg/ottl/parser_test.go @@ -13,12 +13,7 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/sdk/trace/tracetest" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottltest" ) @@ -2217,11 +2212,10 @@ func Test_Condition_Eval(t *testing.T) { func Test_Statements_Execute_Error(t *testing.T) { tests := []struct { - name string - condition boolExpressionEvaluator[any] - function ExprFunc[any] - errorMode ErrorMode - expectedSpans []expectedSpan + name string + condition boolExpressionEvaluator[any] + function ExprFunc[any] + errorMode ErrorMode }{ { name: "IgnoreError error from condition", @@ -2232,31 +2226,6 @@ func Test_Statements_Execute_Error(t *testing.T) { return 1, nil }, errorMode: IgnoreError, - expectedSpans: []expectedSpan{ - { - name: "ottl/StatementExecution", - attributes: []attribute.KeyValue{ - { - Key: "statement", - Value: attribute.StringValue("test"), - }, - { - Key: "condition.matched", - Value: attribute.BoolValue(false), - }, - }, - status: trace.Status{ - Code: codes.Error, - Description: "failed to execute statement 'test': test", - }, - }, - { - name: "ottl/StatementSequenceExecution", - status: trace.Status{ - Code: codes.Ok, - }, - }, - }, }, { name: "PropagateError error from condition", @@ -2267,32 +2236,6 @@ func Test_Statements_Execute_Error(t *testing.T) { return 1, nil }, errorMode: PropagateError, - expectedSpans: []expectedSpan{ - { - name: "ottl/StatementExecution", - attributes: []attribute.KeyValue{ - { - Key: "statement", - Value: attribute.StringValue("test"), - }, - { - Key: "condition.matched", - Value: attribute.BoolValue(false), - }, - }, - status: trace.Status{ - Code: codes.Error, - Description: "failed to execute statement 'test': test", - }, - }, - { - name: "ottl/StatementSequenceExecution", - status: trace.Status{ - Code: codes.Error, - Description: "failed to execute statement 'test': test", - }, - }, - }, }, { name: "IgnoreError error from function", @@ -2303,31 +2246,6 @@ func Test_Statements_Execute_Error(t *testing.T) { return 1, fmt.Errorf("test") }, errorMode: IgnoreError, - expectedSpans: []expectedSpan{ - { - name: "ottl/StatementExecution", - attributes: []attribute.KeyValue{ - { - Key: "statement", - Value: attribute.StringValue("test"), - }, - { - Key: "condition.matched", - Value: attribute.BoolValue(true), - }, - }, - status: trace.Status{ - Code: codes.Error, - Description: "failed to execute statement 'test': test", - }, - }, - { - name: "ottl/StatementSequenceExecution", - status: trace.Status{ - Code: codes.Ok, - }, - }, - }, }, { name: "PropagateError error from function", @@ -2338,32 +2256,6 @@ func Test_Statements_Execute_Error(t *testing.T) { return 1, fmt.Errorf("test") }, errorMode: PropagateError, - expectedSpans: []expectedSpan{ - { - name: "ottl/StatementExecution", - attributes: []attribute.KeyValue{ - { - Key: "statement", - Value: attribute.StringValue("test"), - }, - { - Key: "condition.matched", - Value: attribute.BoolValue(true), - }, - }, - status: trace.Status{ - Code: codes.Error, - Description: "failed to execute statement 'test': test", - }, - }, - { - name: "ottl/StatementSequenceExecution", - status: trace.Status{ - Code: codes.Error, - Description: "failed to execute statement 'test': test", - }, - }, - }, }, { name: "SilentError error from condition", @@ -2374,31 +2266,6 @@ func Test_Statements_Execute_Error(t *testing.T) { return 1, nil }, errorMode: SilentError, - expectedSpans: []expectedSpan{ - { - name: "ottl/StatementExecution", - attributes: []attribute.KeyValue{ - { - Key: "statement", - Value: attribute.StringValue("test"), - }, - { - Key: "condition.matched", - Value: attribute.BoolValue(false), - }, - }, - status: trace.Status{ - Code: codes.Error, - Description: "failed to execute statement 'test': test", - }, - }, - { - name: "ottl/StatementSequenceExecution", - status: trace.Status{ - Code: codes.Ok, - }, - }, - }, }, { name: "SilentError error from function", @@ -2409,31 +2276,6 @@ func Test_Statements_Execute_Error(t *testing.T) { return 1, fmt.Errorf("test") }, errorMode: SilentError, - expectedSpans: []expectedSpan{ - { - name: "ottl/StatementExecution", - attributes: []attribute.KeyValue{ - { - Key: "statement", - Value: attribute.StringValue("test"), - }, - { - Key: "condition.matched", - Value: attribute.BoolValue(true), - }, - }, - status: trace.Status{ - Code: codes.Error, - Description: "failed to execute statement 'test': test", - }, - }, - { - name: "ottl/StatementSequenceExecution", - status: trace.Status{ - Code: codes.Ok, - }, - }, - }, }, } for _, tt := range tests { @@ -2443,15 +2285,11 @@ func Test_Statements_Execute_Error(t *testing.T) { { condition: BoolExpr[any]{tt.condition}, function: Expr[any]{exprFunc: tt.function}, - origText: "test", }, }, errorMode: tt.errorMode, telemetrySettings: componenttest.NewNopTelemetrySettings(), } - spanRecorder := tracetest.NewSpanRecorder() - statements.telemetrySettings.TracerProvider = trace.NewTracerProvider(trace.WithSpanProcessor(spanRecorder)) - statements.tracer = statements.telemetrySettings.TracerProvider.Tracer("ottl") err := statements.Execute(context.Background(), nil) if tt.errorMode == PropagateError { @@ -2459,14 +2297,6 @@ func Test_Statements_Execute_Error(t *testing.T) { } else { assert.NoError(t, err) } - - require.Len(t, spanRecorder.Ended(), len(tt.expectedSpans)) - - for i, es := range tt.expectedSpans { - require.Equal(t, es.name, spanRecorder.Ended()[i].Name()) - require.Equal(t, es.attributes, spanRecorder.Ended()[i].Attributes()) - require.Equal(t, es.status, spanRecorder.Ended()[i].Status()) - } }) } } @@ -2670,9 +2500,3 @@ func Test_ConditionSequence_Eval_Error(t *testing.T) { }) } } - -type expectedSpan struct { - name string - attributes []attribute.KeyValue - status trace.Status -} From 29cd095bcfed82390b5bd50732a4924382d7c4a8 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Fri, 30 Aug 2024 09:39:11 +0200 Subject: [PATCH 08/10] [chore]: enable len rule from testifylint (#34921) #### Description Testifylint is a linter that provides best practices with the use of testify. This PR enables [len](https://github.com/Antonboom/testifylint?tab=readme-ov-file#len) rule from [testifylint](https://github.com/Antonboom/testifylint) --- .golangci.yml | 1 - .../internal/traces/worker_test.go | 6 +- connector/countconnector/connector_test.go | 6 +- .../datadogconnector/connector_native_test.go | 6 +- connector/datadogconnector/connector_test.go | 6 +- .../roundrobinconnector/connector_test.go | 36 +++--- .../servicegraphconnector/connector_test.go | 12 +- .../alertmanager_exporter_test.go | 4 +- .../logsdata_to_logservice_test.go | 2 +- .../tracedata_to_logservice_test.go | 2 +- exporter/awsemfexporter/config_test.go | 2 +- exporter/awsemfexporter/emf_exporter_test.go | 4 +- .../awsemfexporter/grouped_metric_test.go | 12 +- .../awsemfexporter/metric_declaration_test.go | 14 +- .../awsemfexporter/metric_translator_test.go | 22 ++-- .../internal/translator/aws_test.go | 24 ++-- .../internal/translator/cause_test.go | 2 +- .../internal/translator/segment_test.go | 74 +++++------ .../internal/translator/span_links_test.go | 16 +-- .../adx_exporter_test.go | 2 +- .../metricexporter_test.go | 4 +- .../trace_to_envelope_test.go | 2 +- exporter/datadogexporter/examples_test.go | 2 +- .../metrics/series_deprecated_test.go | 2 +- .../internal/metrics/series_test.go | 4 +- .../datadogexporter/metrics_exporter_test.go | 4 +- .../config_test.go | 2 +- exporter/influxdbexporter/writer_test.go | 4 +- .../internal/arrow/exporter_test.go | 10 +- exporter/otelarrowexporter/otelarrow_test.go | 8 +- exporter/prometheusexporter/collector_test.go | 4 +- .../exporter_test.go | 8 +- .../helper_test.go | 6 +- exporter/signalfxexporter/factory_test.go | 26 ++-- .../internal/apm/tracetracker/tracker_test.go | 4 +- .../internal/hostmetadata/metadata_test.go | 4 +- .../internal/translation/converter_test.go | 6 +- .../internal/translation/translator_test.go | 16 +-- exporter/splunkhecexporter/client_test.go | 6 +- .../splunkhecexporter/integration_test.go | 6 +- .../logsdata_to_logservice_test.go | 2 +- extension/ackextension/inmemory_test.go | 22 ++-- .../observer/ecsobserver/fetcher_test.go | 12 +- .../internal/ecsmock/service_test.go | 6 +- .../observer/hostobserver/extension_test.go | 2 +- extension/opampextension/registry_test.go | 2 +- .../storage/filestorage/extension_test.go | 10 +- extension/storage/storagetest/host_test.go | 8 +- internal/aws/cwlogs/pusher_test.go | 26 ++-- internal/aws/ecsutil/client_test.go | 2 +- .../aws/ecsutil/metadata_provider_test.go | 2 +- internal/aws/k8s/k8sclient/clientset_test.go | 4 +- internal/aws/k8s/k8sclient/obj_store_test.go | 8 +- internal/aws/xray/telemetry/sender_test.go | 4 +- internal/common/testutil/testutil.go | 4 +- internal/common/testutil/testutil_test.go | 4 +- .../coreinternal/consumerretry/logs_test.go | 4 +- .../goldendataset/pict_metrics_gen_test.go | 2 +- .../goldendataset/traces_generator_test.go | 2 +- internal/docker/docker_test.go | 4 +- .../filterset/regexp/regexpfilterset_test.go | 2 +- internal/kubelet/client_test.go | 4 +- internal/otelarrow/testutil/testutil.go | 4 +- internal/otelarrow/testutil/testutil_test.go | 4 +- pkg/ottl/functions_test.go | 6 +- pkg/stanza/adapter/frompdataconverter_test.go | 2 +- pkg/stanza/fileconsumer/attrs/attrs_test.go | 2 +- pkg/stanza/fileconsumer/file_test.go | 2 +- .../internal/fingerprint/fingerprint_test.go | 2 +- .../internal/reader/fingerprint_test.go | 2 +- pkg/stanza/operator/helper/emitter_test.go | 4 +- .../operator/input/windows/buffer_test.go | 4 +- .../transformer/recombine/transformer_test.go | 4 +- pkg/stanza/pipeline/config_test.go | 12 +- .../jaeger/traces_to_jaegerproto_test.go | 2 +- .../opencensus/traces_to_oc_test.go | 2 +- pkg/translator/signalfx/to_metrics_test.go | 2 +- .../skywalkingproto_to_traces_test.go | 2 +- .../zipkin/zipkinv2/from_translator_test.go | 2 +- pkg/winperfcounters/watcher_test.go | 2 +- .../processor_test.go | 2 +- .../deltatorateprocessor/processor_test.go | 2 +- processor/filterprocessor/metrics_test.go | 4 +- processor/filterprocessor/traces_test.go | 2 +- .../geoipprocessor/geoip_processor_test.go | 6 +- .../groupbytraceprocessor/processor_test.go | 2 +- .../internal/kube/client_test.go | 120 +++++++++--------- .../processor_test.go | 2 +- .../metrics_transform_processor_group_test.go | 2 +- .../metrics_transform_processor_test.go | 2 +- .../logsprocessor_test.go | 14 +- .../tracesprocessor_test.go | 24 ++-- .../internal/resourcedetection_test.go | 4 +- .../tailsamplingprocessor/processor_test.go | 6 +- processor/transformprocessor/config_test.go | 2 +- receiver/awscloudwatchreceiver/logs_test.go | 2 +- .../cadvisor/container_info_processor_test.go | 2 +- .../extractors/diskio_extractor_test.go | 2 +- .../cadvisor/extractors/extractor_test.go | 2 +- .../cadvisor/extractors/fs_extractor_test.go | 4 +- .../cadvisor/extractors/net_extractor_test.go | 2 +- .../internal/ecsInfo/ecs_task_info_test.go | 4 +- .../internal/host/ebsvolume_test.go | 4 +- .../internal/stores/podstore_test.go | 4 +- receiver/azureblobreceiver/config_test.go | 2 +- receiver/azureeventhubreceiver/config_test.go | 2 +- .../internal/translator/series_test.go | 4 +- .../translator/traces_translator_test.go | 12 +- receiver/gitproviderreceiver/config_test.go | 2 +- .../scraper/githubscraper/helpers_test.go | 2 +- .../internal/filter/itemcardinality_test.go | 8 +- .../internal/filter/testhelpers_test.go | 2 +- .../filterfactory/filterbuilder_test.go | 14 +- .../internal/metadata/metricsbuilder_test.go | 2 +- .../metadata/metricsdatapoint_test.go | 6 +- .../internal/metadata/metricsmetadata_test.go | 2 +- .../internal/metadataparser/metadata_test.go | 8 +- .../metadataparser/metadataparser_test.go | 6 +- .../statsreader/databasereader_test.go | 4 +- .../statsreaders_mockedspanner_test.go | 2 +- .../statsreader/timestampsgenerator_test.go | 6 +- .../receiver_test.go | 8 +- receiver/hostmetricsreceiver/config_test.go | 2 +- receiver/jaegerreceiver/jaeger_agent_test.go | 2 +- .../jaegerreceiver/trace_receiver_test.go | 6 +- .../internal/cronjob/cronjobs_test.go | 2 +- .../internal/metadata/metadata_test.go | 2 +- .../internal/statefulset/statefulsets_test.go | 2 +- .../internal/kubelet/accumulator_test.go | 2 +- .../internal/kubelet/metrics_test.go | 2 +- .../internal/octrace/observability_test.go | 2 +- .../opencensusreceiver/opencensus_test.go | 4 +- receiver/otelarrowreceiver/otelarrow_test.go | 6 +- receiver/podmanreceiver/podman_test.go | 10 +- .../podmanreceiver/record_metrics_test.go | 2 +- receiver/prometheusreceiver/config_test.go | 4 +- .../metrics_receiver_helper_test.go | 10 +- receiver/redisreceiver/redis_svc_test.go | 2 +- receiver/sapmreceiver/trace_receiver_test.go | 4 +- receiver/signalfxreceiver/receiver_test.go | 2 +- receiver/splunkhecreceiver/receiver_test.go | 28 ++-- receiver/sqlqueryreceiver/integration_test.go | 2 +- .../sqlserverreceiver/scraper_windows_test.go | 2 +- .../internal/configssh/configssh_test.go | 4 +- .../internal/protocol/statsd_parser_test.go | 4 +- .../internal/transport/server_test.go | 2 +- receiver/vcenterreceiver/client_test.go | 2 +- receiver/wavefrontreceiver/receiver_test.go | 2 +- .../receiver_windows_test.go | 2 +- testbed/testbed/validator.go | 2 +- testbed/tests/syslog_integration_test.go | 4 +- 151 files changed, 504 insertions(+), 505 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 144d9065ad2c..8c17e0bb8216 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -141,7 +141,6 @@ linters-settings: - expected-actual - float-compare - go-require - - len - negative-positive - nil-compare - require-error diff --git a/cmd/telemetrygen/internal/traces/worker_test.go b/cmd/telemetrygen/internal/traces/worker_test.go index 0fbbf858de61..cc7b5a430325 100644 --- a/cmd/telemetrygen/internal/traces/worker_test.go +++ b/cmd/telemetrygen/internal/traces/worker_test.go @@ -262,7 +262,7 @@ func TestSpansWithNoAttrs(t *testing.T) { assert.Len(t, syncer.spans, 4) // each trace has two spans for _, span := range syncer.spans { attributes := span.Attributes() - assert.Equal(t, 2, len(attributes), "it shouldn't have more than 2 fixed attributes") + assert.Len(t, attributes, 2, "it shouldn't have more than 2 fixed attributes") } } @@ -284,7 +284,7 @@ func TestSpansWithOneAttrs(t *testing.T) { assert.Len(t, syncer.spans, 4) // each trace has two spans for _, span := range syncer.spans { attributes := span.Attributes() - assert.Equal(t, 3, len(attributes), "it should have more than 3 attributes") + assert.Len(t, attributes, 3, "it should have more than 3 attributes") } } @@ -306,7 +306,7 @@ func TestSpansWithMultipleAttrs(t *testing.T) { assert.Len(t, syncer.spans, 4) // each trace has two spans for _, span := range syncer.spans { attributes := span.Attributes() - assert.Equal(t, 4, len(attributes), "it should have more than 4 attributes") + assert.Len(t, attributes, 4, "it should have more than 4 attributes") } } diff --git a/connector/countconnector/connector_test.go b/connector/countconnector/connector_test.go index 0938e4fba557..44ad48f8e9a5 100644 --- a/connector/countconnector/connector_test.go +++ b/connector/countconnector/connector_test.go @@ -265,7 +265,7 @@ func TestTracesToMetrics(t *testing.T) { assert.NoError(t, conn.ConsumeTraces(context.Background(), testSpans)) allMetrics := sink.AllMetrics() - assert.Equal(t, 1, len(allMetrics)) + assert.Len(t, allMetrics, 1) // golden.WriteMetrics(t, filepath.Join("testdata", "traces", tc.name+".yaml"), allMetrics[0]) expected, err := golden.ReadMetrics(filepath.Join("testdata", "traces", tc.name+".yaml")) @@ -507,7 +507,7 @@ func TestMetricsToMetrics(t *testing.T) { assert.NoError(t, conn.ConsumeMetrics(context.Background(), testMetrics)) allMetrics := sink.AllMetrics() - assert.Equal(t, 1, len(allMetrics)) + assert.Len(t, allMetrics, 1) // golden.WriteMetrics(t, filepath.Join("testdata", "metrics", tc.name+".yaml"), allMetrics[0]) expected, err := golden.ReadMetrics(filepath.Join("testdata", "metrics", tc.name+".yaml")) @@ -679,7 +679,7 @@ func TestLogsToMetrics(t *testing.T) { assert.NoError(t, conn.ConsumeLogs(context.Background(), testLogs)) allMetrics := sink.AllMetrics() - assert.Equal(t, 1, len(allMetrics)) + assert.Len(t, allMetrics, 1) // golden.WriteMetrics(t, filepath.Join("testdata", "logs", tc.name+".yaml"), allMetrics[0]) expected, err := golden.ReadMetrics(filepath.Join("testdata", "logs", tc.name+".yaml")) diff --git a/connector/datadogconnector/connector_native_test.go b/connector/datadogconnector/connector_native_test.go index b2d2c9c6f57e..3a7ba0a1e1da 100644 --- a/connector/datadogconnector/connector_native_test.go +++ b/connector/datadogconnector/connector_native_test.go @@ -104,7 +104,7 @@ func TestContainerTagsNative(t *testing.T) { // check if the container tags are added to the metrics metrics := metricsSink.AllMetrics() - assert.Equal(t, 1, len(metrics)) + assert.Len(t, metrics, 1) ch := make(chan []byte, 100) tr := newTranslatorWithStatsChannel(t, zap.NewNop(), ch) @@ -117,7 +117,7 @@ func TestContainerTagsNative(t *testing.T) { require.NoError(t, err) tags := sp.Stats[0].Tags - assert.Equal(t, 3, len(tags)) + assert.Len(t, tags, 3) assert.ElementsMatch(t, []string{"region:my-region", "zone:my-zone", "az:my-az"}, tags) } @@ -187,7 +187,7 @@ func TestMeasuredAndClientKindNative(t *testing.T) { } metrics := metricsSink.AllMetrics() - require.Equal(t, 1, len(metrics)) + require.Len(t, metrics, 1) ch := make(chan []byte, 100) tr := newTranslatorWithStatsChannel(t, zap.NewNop(), ch) diff --git a/connector/datadogconnector/connector_test.go b/connector/datadogconnector/connector_test.go index 00f31273a9e7..9a3bfe143122 100644 --- a/connector/datadogconnector/connector_test.go +++ b/connector/datadogconnector/connector_test.go @@ -158,7 +158,7 @@ func TestContainerTags(t *testing.T) { err = connector.ConsumeTraces(context.Background(), trace2) assert.NoError(t, err) // check if the container tags are added to the cache - assert.Equal(t, 1, len(connector.containerTagCache.Items())) + assert.Len(t, connector.containerTagCache.Items(), 1) count := 0 connector.containerTagCache.Items()["my-container-id"].Object.(*sync.Map).Range(func(_, _ any) bool { count++ @@ -175,7 +175,7 @@ func TestContainerTags(t *testing.T) { // check if the container tags are added to the metrics metrics := metricsSink.AllMetrics() - assert.Equal(t, 1, len(metrics)) + assert.Len(t, metrics, 1) ch := make(chan []byte, 100) tr := newTranslatorWithStatsChannel(t, zap.NewNop(), ch) @@ -188,7 +188,7 @@ func TestContainerTags(t *testing.T) { require.NoError(t, err) tags := sp.Stats[0].Tags - assert.Equal(t, 3, len(tags)) + assert.Len(t, tags, 3) assert.ElementsMatch(t, []string{"region:my-region", "zone:my-zone", "az:my-az"}, tags) } diff --git a/connector/roundrobinconnector/connector_test.go b/connector/roundrobinconnector/connector_test.go index 504d2ee1510a..8385e72ee4f9 100644 --- a/connector/roundrobinconnector/connector_test.go +++ b/connector/roundrobinconnector/connector_test.go @@ -50,17 +50,17 @@ func TestLogsRoundRobin(t *testing.T) { assert.NoError(t, logs.ConsumeLogs(ctx, plog.NewLogs())) assert.NoError(t, logs.ConsumeLogs(ctx, plog.NewLogs())) - assert.Equal(t, 1, len(sink1.AllLogs())) - assert.Equal(t, 1, len(sink2.AllLogs())) - assert.Equal(t, 1, len(sink3.AllLogs())) + assert.Len(t, sink1.AllLogs(), 1) + assert.Len(t, sink2.AllLogs(), 1) + assert.Len(t, sink3.AllLogs(), 1) assert.NoError(t, logs.ConsumeLogs(ctx, plog.NewLogs())) assert.NoError(t, logs.ConsumeLogs(ctx, plog.NewLogs())) assert.NoError(t, logs.ConsumeLogs(ctx, plog.NewLogs())) - assert.Equal(t, 2, len(sink1.AllLogs())) - assert.Equal(t, 2, len(sink2.AllLogs())) - assert.Equal(t, 2, len(sink3.AllLogs())) + assert.Len(t, sink1.AllLogs(), 2) + assert.Len(t, sink2.AllLogs(), 2) + assert.Len(t, sink3.AllLogs(), 2) assert.NoError(t, logs.Shutdown(ctx)) } @@ -87,17 +87,17 @@ func TestMetricsRoundRobin(t *testing.T) { assert.NoError(t, metrics.ConsumeMetrics(ctx, pmetric.NewMetrics())) assert.NoError(t, metrics.ConsumeMetrics(ctx, pmetric.NewMetrics())) - assert.Equal(t, 1, len(sink1.AllMetrics())) - assert.Equal(t, 1, len(sink2.AllMetrics())) - assert.Equal(t, 1, len(sink3.AllMetrics())) + assert.Len(t, sink1.AllMetrics(), 1) + assert.Len(t, sink2.AllMetrics(), 1) + assert.Len(t, sink3.AllMetrics(), 1) assert.NoError(t, metrics.ConsumeMetrics(ctx, pmetric.NewMetrics())) assert.NoError(t, metrics.ConsumeMetrics(ctx, pmetric.NewMetrics())) assert.NoError(t, metrics.ConsumeMetrics(ctx, pmetric.NewMetrics())) - assert.Equal(t, 2, len(sink1.AllMetrics())) - assert.Equal(t, 2, len(sink2.AllMetrics())) - assert.Equal(t, 2, len(sink3.AllMetrics())) + assert.Len(t, sink1.AllMetrics(), 2) + assert.Len(t, sink2.AllMetrics(), 2) + assert.Len(t, sink3.AllMetrics(), 2) assert.NoError(t, metrics.Shutdown(ctx)) } @@ -124,17 +124,17 @@ func TestTracesRoundRobin(t *testing.T) { assert.NoError(t, traces.ConsumeTraces(ctx, ptrace.NewTraces())) assert.NoError(t, traces.ConsumeTraces(ctx, ptrace.NewTraces())) - assert.Equal(t, 1, len(sink1.AllTraces())) - assert.Equal(t, 1, len(sink2.AllTraces())) - assert.Equal(t, 1, len(sink3.AllTraces())) + assert.Len(t, sink1.AllTraces(), 1) + assert.Len(t, sink2.AllTraces(), 1) + assert.Len(t, sink3.AllTraces(), 1) assert.NoError(t, traces.ConsumeTraces(ctx, ptrace.NewTraces())) assert.NoError(t, traces.ConsumeTraces(ctx, ptrace.NewTraces())) assert.NoError(t, traces.ConsumeTraces(ctx, ptrace.NewTraces())) - assert.Equal(t, 2, len(sink1.AllTraces())) - assert.Equal(t, 2, len(sink2.AllTraces())) - assert.Equal(t, 2, len(sink3.AllTraces())) + assert.Len(t, sink1.AllTraces(), 2) + assert.Len(t, sink2.AllTraces(), 2) + assert.Len(t, sink3.AllTraces(), 2) assert.NoError(t, traces.Shutdown(ctx)) } diff --git a/connector/servicegraphconnector/connector_test.go b/connector/servicegraphconnector/connector_test.go index 8b15c93b057e..5a8b3a66b6f8 100644 --- a/connector/servicegraphconnector/connector_test.go +++ b/connector/servicegraphconnector/connector_test.go @@ -480,7 +480,7 @@ func TestStaleSeriesCleanup(t *testing.T) { p.keyToMetric[key] = metric } p.cleanCache() - assert.Equal(t, 0, len(p.keyToMetric)) + assert.Len(t, p.keyToMetric, 0) // ConsumeTraces with a trace with different attribute value td = buildSampleTrace(t, "second") @@ -526,8 +526,8 @@ func TestMapsAreConsistentDuringCleanup(t *testing.T) { go p.cleanCache() // Since everything is locked, nothing has happened, so both should still have length 1 - assert.Equal(t, 1, len(p.reqTotal)) - assert.Equal(t, 1, len(p.keyToMetric)) + assert.Len(t, p.reqTotal, 1) + assert.Len(t, p.keyToMetric, 1) // Now we pretend that we have stopped collecting metrics, by unlocking seriesMutex p.seriesMutex.Unlock() @@ -540,8 +540,8 @@ func TestMapsAreConsistentDuringCleanup(t *testing.T) { // for dimensions from that series. It's important that it happens this way around, // instead of deleting it from `keyToMetric`, otherwise the metrics collector will try // and fail to find dimensions for a series that is about to be removed. - assert.Equal(t, 0, len(p.reqTotal)) - assert.Equal(t, 1, len(p.keyToMetric)) + assert.Len(t, p.reqTotal, 0) + assert.Len(t, p.keyToMetric, 1) p.metricMutex.RUnlock() p.seriesMutex.Unlock() @@ -575,7 +575,7 @@ func TestValidateOwnTelemetry(t *testing.T) { p.keyToMetric[key] = metric } p.cleanCache() - assert.Equal(t, 0, len(p.keyToMetric)) + assert.Len(t, p.keyToMetric, 0) // ConsumeTraces with a trace with different attribute value td = buildSampleTrace(t, "second") diff --git a/exporter/alertmanagerexporter/alertmanager_exporter_test.go b/exporter/alertmanagerexporter/alertmanager_exporter_test.go index b5f8f888ca10..0dec06561975 100644 --- a/exporter/alertmanagerexporter/alertmanager_exporter_test.go +++ b/exporter/alertmanagerexporter/alertmanager_exporter_test.go @@ -101,7 +101,7 @@ func TestAlertManagerExporterExtractEvents(t *testing.T) { // test - events got := am.extractEvents(traces) - assert.Equal(t, tt.events, len(got)) + assert.Len(t, got, tt.events) }) } } @@ -133,7 +133,7 @@ func TestAlertManagerExporterEventNameAttributes(t *testing.T) { got := am.extractEvents(traces) // test - result length - assert.Equal(t, 1, len(got)) + assert.Len(t, got, 1) // test - count of attributes assert.Equal(t, 3, got[0].spanEvent.Attributes().Len()) diff --git a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go index 6e8172bc30b6..0593f677210c 100644 --- a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go @@ -75,7 +75,7 @@ func TestLogsDataToLogService(t *testing.T) { totalLogCount := 10 validLogCount := totalLogCount - 1 gotLogs := logDataToLogService(createLogData(10)) - assert.Equal(t, len(gotLogs), 9) + assert.Len(t, gotLogs, 9) gotLogPairs := make([][]logKeyValuePair, 0, len(gotLogs)) diff --git a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go index 88d005199739..8cf82da88c4a 100644 --- a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go @@ -30,7 +30,7 @@ func (kv logKeyValuePairs) Less(i, j int) bool { return kv[i].Key < kv[j].Key } func TestTraceDataToLogService(t *testing.T) { gotLogs := traceDataToLogServiceData(constructSpanData()) - assert.Equal(t, len(gotLogs), 2) + assert.Len(t, gotLogs, 2) gotLogPairs := make([][]logKeyValuePair, 0, len(gotLogs)) diff --git a/exporter/awsemfexporter/config_test.go b/exporter/awsemfexporter/config_test.go index bd7e7710a542..b160ccc8e2d6 100644 --- a/exporter/awsemfexporter/config_test.go +++ b/exporter/awsemfexporter/config_test.go @@ -140,7 +140,7 @@ func TestConfigValidate(t *testing.T) { } assert.NoError(t, component.ValidateConfig(cfg)) - assert.Equal(t, 2, len(cfg.MetricDescriptors)) + assert.Len(t, cfg.MetricDescriptors, 2) assert.Equal(t, []MetricDescriptor{ {Unit: "Count", MetricName: "apiserver_total", Overwrite: true}, {Unit: "Megabytes", MetricName: "memory_usage"}, diff --git a/exporter/awsemfexporter/emf_exporter_test.go b/exporter/awsemfexporter/emf_exporter_test.go index aafe96f9737c..05a63e8a57c8 100644 --- a/exporter/awsemfexporter/emf_exporter_test.go +++ b/exporter/awsemfexporter/emf_exporter_test.go @@ -370,9 +370,9 @@ func TestNewExporterWithMetricDeclarations(t *testing.T) { assert.NoError(t, err) // Invalid metric declaration should be filtered out - assert.Equal(t, 3, len(exp.config.MetricDeclarations)) + assert.Len(t, exp.config.MetricDeclarations, 3) // Invalid dimensions (> 10 dims) should be filtered out - assert.Equal(t, 1, len(exp.config.MetricDeclarations[2].Dimensions)) + assert.Len(t, exp.config.MetricDeclarations[2].Dimensions, 1) // Test output warning logs expectedLogs := []observer.LoggedEntry{ diff --git a/exporter/awsemfexporter/grouped_metric_test.go b/exporter/awsemfexporter/grouped_metric_test.go index 47bee6e50fea..f407880b1f01 100644 --- a/exporter/awsemfexporter/grouped_metric_test.go +++ b/exporter/awsemfexporter/grouped_metric_test.go @@ -116,11 +116,11 @@ func TestAddToGroupedMetric(t *testing.T) { assert.NoError(t, err) } - assert.Equal(t, 1, len(groupedMetrics)) + assert.Len(t, groupedMetrics, 1) for _, v := range groupedMetrics { assert.Equal(t, len(tc.expectedMetricInfo), len(v.metrics)) assert.Equal(t, tc.expectedMetricInfo, v.metrics) - assert.Equal(t, 2, len(v.labels)) + assert.Len(t, v.labels, 2) assert.Equal(t, generateTestMetricMetadata(namespace, timestamp, logGroup, logStreamName, instrumentationLibName, tc.expectedMetricType), v.metadata) assert.Equal(t, tc.expectedLabels, v.labels) } @@ -158,7 +158,7 @@ func TestAddToGroupedMetric(t *testing.T) { assert.NoError(t, err) } - assert.Equal(t, 4, len(groupedMetrics)) + assert.Len(t, groupedMetrics, 4) for _, group := range groupedMetrics { for metricName, metricInfo := range group.metrics { switch metricName { @@ -230,7 +230,7 @@ func TestAddToGroupedMetric(t *testing.T) { assert.NoError(t, err) } - assert.Equal(t, 4, len(groupedMetrics)) + assert.Len(t, groupedMetrics, 4) for _, group := range groupedMetrics { for metricName, metricInfo := range group.metrics { switch metricName { @@ -348,7 +348,7 @@ func TestAddToGroupedMetric(t *testing.T) { ) assert.NoError(t, err) } - assert.Equal(t, 1, len(groupedMetrics)) + assert.Len(t, groupedMetrics, 1) labels := map[string]string{ oTellibDimensionKey: instrumentationLibName, @@ -389,7 +389,7 @@ func TestAddToGroupedMetric(t *testing.T) { emfCalcs, ) assert.NoError(t, err) - assert.Equal(t, 0, len(groupedMetrics)) + assert.Len(t, groupedMetrics, 0) // Test output warning logs expectedLogs := []observer.LoggedEntry{ diff --git a/exporter/awsemfexporter/metric_declaration_test.go b/exporter/awsemfexporter/metric_declaration_test.go index 00706b006a33..b50996465344 100644 --- a/exporter/awsemfexporter/metric_declaration_test.go +++ b/exporter/awsemfexporter/metric_declaration_test.go @@ -226,7 +226,7 @@ func TestMetricDeclarationInit(t *testing.T) { } err := m.init(logger) assert.NoError(t, err) - assert.Equal(t, 3, len(m.metricRegexList)) + assert.Len(t, m.metricRegexList, 3) }) t.Run("with dimensions", func(t *testing.T) { @@ -239,8 +239,8 @@ func TestMetricDeclarationInit(t *testing.T) { } err := m.init(logger) assert.NoError(t, err) - assert.Equal(t, 3, len(m.metricRegexList)) - assert.Equal(t, 2, len(m.Dimensions)) + assert.Len(t, m.metricRegexList, 3) + assert.Len(t, m.Dimensions, 2) }) // Test removal of dimension sets with more than 10 elements @@ -256,8 +256,8 @@ func TestMetricDeclarationInit(t *testing.T) { obsLogger := zap.New(obs) err := m.init(obsLogger) assert.NoError(t, err) - assert.Equal(t, 3, len(m.metricRegexList)) - assert.Equal(t, 1, len(m.Dimensions)) + assert.Len(t, m.metricRegexList, 3) + assert.Len(t, m.Dimensions, 1) // Check logged warning message expectedLogs := []observer.LoggedEntry{{ Entry: zapcore.Entry{Level: zap.WarnLevel, Message: "Dropped dimension set: > 10 dimensions specified."}, @@ -281,7 +281,7 @@ func TestMetricDeclarationInit(t *testing.T) { obsLogger := zap.New(obs) err := m.init(obsLogger) assert.NoError(t, err) - assert.Equal(t, 1, len(m.Dimensions)) + assert.Len(t, m.Dimensions, 1) assert.Equal(t, []string{"a", "b", "c"}, m.Dimensions[0]) // Check logged warning message expectedLogs := []observer.LoggedEntry{ @@ -324,7 +324,7 @@ func TestMetricDeclarationInit(t *testing.T) { } err := m.init(logger) assert.NoError(t, err) - assert.Equal(t, 2, len(m.LabelMatchers)) + assert.Len(t, m.LabelMatchers, 2) assert.Equal(t, ";", m.LabelMatchers[0].Separator) assert.Equal(t, ".+", m.LabelMatchers[0].Regex) assert.NotNil(t, m.LabelMatchers[0].compiledRegex) diff --git a/exporter/awsemfexporter/metric_translator_test.go b/exporter/awsemfexporter/metric_translator_test.go index 1ac411048e6d..ec1a3580dbb0 100644 --- a/exporter/awsemfexporter/metric_translator_test.go +++ b/exporter/awsemfexporter/metric_translator_test.go @@ -352,21 +352,21 @@ func TestTranslateOtToGroupedMetric(t *testing.T) { err := translator.translateOTelToGroupedMetric(tc.metric, groupedMetrics, config) assert.NoError(t, err) assert.NotNil(t, groupedMetrics) - assert.Equal(t, 3, len(groupedMetrics)) + assert.Len(t, groupedMetrics, 3) for _, v := range groupedMetrics { assert.Equal(t, tc.expectedNamespace, v.metadata.namespace) switch { case v.metadata.metricDataType == pmetric.MetricTypeSum: - assert.Equal(t, 2, len(v.metrics)) + assert.Len(t, v.metrics, 2) assert.Equal(t, tc.counterLabels, v.labels) assert.Equal(t, counterSumMetrics, v.metrics) case v.metadata.metricDataType == pmetric.MetricTypeGauge: - assert.Equal(t, 2, len(v.metrics)) + assert.Len(t, v.metrics, 2) assert.Equal(t, tc.counterLabels, v.labels) assert.Equal(t, counterGaugeMetrics, v.metrics) case v.metadata.metricDataType == pmetric.MetricTypeHistogram: - assert.Equal(t, 1, len(v.metrics)) + assert.Len(t, v.metrics, 1) assert.Equal(t, tc.timerLabels, v.labels) assert.Equal(t, timerMetrics, v.metrics) default: @@ -383,7 +383,7 @@ func TestTranslateOtToGroupedMetric(t *testing.T) { groupedMetrics := make(map[any]*groupedMetric) err := translator.translateOTelToGroupedMetric(rm, groupedMetrics, config) assert.NoError(t, err) - assert.Equal(t, 0, len(groupedMetrics)) + assert.Len(t, groupedMetrics, 0) }) } @@ -1493,7 +1493,7 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) { // Have to perform this hacky equality check because the metric names might not // be in the right order due to map iteration assert.Equal(t, expectedLog.Entry, log.Entry) - assert.Equal(t, 2, len(log.Context)) + assert.Len(t, log.Context, 2) assert.Equal(t, expectedLog.Context[0], log.Context[0]) isMatch := false possibleOrders := []zapcore.Field{ @@ -1557,7 +1557,7 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) { seen := make([]bool, 3) for _, log := range logs.AllUntimed() { assert.Equal(t, expectedEntry, log.Entry) - assert.Equal(t, 1, len(log.Context)) + assert.Len(t, log.Context, 1) hasMatch := false for i, expectedCtx := range expectedContexts { if !seen[i] && log.Context[0].Equals(expectedCtx) { @@ -1957,9 +1957,9 @@ func TestGroupedMetricToCWMeasurementsWithFilters(t *testing.T) { cWMeasurements := groupedMetricToCWMeasurementsWithFilters(groupedMetric, config) if len(tc.expectedDims) == 0 { - assert.Equal(t, 0, len(cWMeasurements)) + assert.Len(t, cWMeasurements, 0) } else { - assert.Equal(t, 1, len(cWMeasurements)) + assert.Len(t, cWMeasurements, 1) dims := cWMeasurements[0].Dimensions assertDimsEqual(t, tc.expectedDims, dims) } @@ -2335,7 +2335,7 @@ func TestTranslateOtToGroupedMetricForLogGroupAndStream(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, groupedMetrics) - assert.Equal(t, 1, len(groupedMetrics)) + assert.Len(t, groupedMetrics, 1) for _, actual := range groupedMetrics { assert.Equal(t, test.outLogGroupName, actual.metadata.logGroup) @@ -2366,7 +2366,7 @@ func TestTranslateOtToGroupedMetricForInitialDeltaValue(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, groupedMetrics) - assert.Equal(t, 1, len(groupedMetrics)) + assert.Len(t, groupedMetrics, 1) for _, actual := range groupedMetrics { assert.True(t, actual.metadata.retainInitialValueForDelta) diff --git a/exporter/awsxrayexporter/internal/translator/aws_test.go b/exporter/awsxrayexporter/internal/translator/aws_test.go index 9b0f4f596ac8..51d4609ee95e 100644 --- a/exporter/awsxrayexporter/internal/translator/aws_test.go +++ b/exporter/awsxrayexporter/internal/translator/aws_test.go @@ -409,7 +409,7 @@ func TestLogGroups(t *testing.T) { assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 2, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 2) assert.Contains(t, awsData.CWLogs, cwl1) assert.Contains(t, awsData.CWLogs, cwl2) } @@ -437,7 +437,7 @@ func TestLogGroupsFromArns(t *testing.T) { assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 2, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 2) assert.Contains(t, awsData.CWLogs, cwl1) assert.Contains(t, awsData.CWLogs, cwl2) } @@ -456,7 +456,7 @@ func TestLogGroupsFromStringResourceAttribute(t *testing.T) { assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 1, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 1) assert.Contains(t, awsData.CWLogs, cwl1) } @@ -476,7 +476,7 @@ func TestLogGroupsWithAmpersandFromStringResourceAttribute(t *testing.T) { filtered, awsData := makeAws(attributes, resource, nil) assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 2, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 2) assert.Contains(t, awsData.CWLogs, cwl1) assert.Contains(t, awsData.CWLogs, cwl2) @@ -485,7 +485,7 @@ func TestLogGroupsWithAmpersandFromStringResourceAttribute(t *testing.T) { filtered, awsData = makeAws(attributes, resource, nil) assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 2, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 2) assert.Contains(t, awsData.CWLogs, cwl1) assert.Contains(t, awsData.CWLogs, cwl2) @@ -494,7 +494,7 @@ func TestLogGroupsWithAmpersandFromStringResourceAttribute(t *testing.T) { filtered, awsData = makeAws(attributes, resource, nil) assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 2, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 2) assert.Contains(t, awsData.CWLogs, cwl1) assert.Contains(t, awsData.CWLogs, cwl2) @@ -503,7 +503,7 @@ func TestLogGroupsWithAmpersandFromStringResourceAttribute(t *testing.T) { filtered, awsData = makeAws(attributes, resource, nil) assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 2, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 2) assert.Contains(t, awsData.CWLogs, cwl1) assert.Contains(t, awsData.CWLogs, cwl2) @@ -512,7 +512,7 @@ func TestLogGroupsWithAmpersandFromStringResourceAttribute(t *testing.T) { filtered, awsData = makeAws(attributes, resource, nil) assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 0, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 0) } func TestLogGroupsInvalidType(t *testing.T) { @@ -524,7 +524,7 @@ func TestLogGroupsInvalidType(t *testing.T) { assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 0, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 0) } // Simulate Log groups arns being set using OTEL_RESOURCE_ATTRIBUTES @@ -544,7 +544,7 @@ func TestLogGroupsArnsFromStringResourceAttributes(t *testing.T) { assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 1, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 1) assert.Contains(t, awsData.CWLogs, cwl1) } @@ -569,7 +569,7 @@ func TestLogGroupsArnsWithAmpersandFromStringResourceAttributes(t *testing.T) { assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 2, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 2) assert.Contains(t, awsData.CWLogs, cwl1) assert.Contains(t, awsData.CWLogs, cwl2) } @@ -589,7 +589,7 @@ func TestLogGroupsFromConfig(t *testing.T) { assert.NotNil(t, filtered) assert.NotNil(t, awsData) - assert.Equal(t, 2, len(awsData.CWLogs)) + assert.Len(t, awsData.CWLogs, 2) assert.Contains(t, awsData.CWLogs, cwl1) assert.Contains(t, awsData.CWLogs, cwl2) } diff --git a/exporter/awsxrayexporter/internal/translator/cause_test.go b/exporter/awsxrayexporter/internal/translator/cause_test.go index eabe8147369a..de5e3c71b927 100644 --- a/exporter/awsxrayexporter/internal/translator/cause_test.go +++ b/exporter/awsxrayexporter/internal/translator/cause_test.go @@ -81,7 +81,7 @@ func TestMakeCauseAwsSdkSpan(t *testing.T) { assert.False(t, isThrottle) assert.NotNil(t, cause) - assert.Equal(t, 1, len(cause.CauseObject.Exceptions)) + assert.Len(t, cause.CauseObject.Exceptions, 1) exception := cause.CauseObject.Exceptions[0] assert.Equal(t, AwsIndividualHTTPErrorEventType, *exception.Type) assert.True(t, *exception.Remote) diff --git a/exporter/awsxrayexporter/internal/translator/segment_test.go b/exporter/awsxrayexporter/internal/translator/segment_test.go index 1ef895c9c8b4..404eb42ef954 100644 --- a/exporter/awsxrayexporter/internal/translator/segment_test.go +++ b/exporter/awsxrayexporter/internal/translator/segment_test.go @@ -240,7 +240,7 @@ func TestClientSpanWithDbComponent(t *testing.T) { assert.NotNil(t, segment.Service) assert.NotNil(t, segment.AWS) assert.NotNil(t, segment.Metadata) - assert.Equal(t, 0, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 0) assert.Equal(t, enterpriseAppID, segment.Metadata["default"]["enterprise.app.id"]) assert.Nil(t, segment.Cause) assert.Nil(t, segment.HTTP) @@ -467,7 +467,7 @@ func TestSpanWithAttributesDefaultNotIndexed(t *testing.T) { segment, _ := MakeSegment(span, resource, nil, false, nil, false) assert.NotNil(t, segment) - assert.Equal(t, 0, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 0) assert.Equal(t, "val1", segment.Metadata["default"]["attr1@1"]) assert.Equal(t, "val2", segment.Metadata["default"]["attr2@2"]) assert.Equal(t, "string", segment.Metadata["default"]["otel.resource.string.key"]) @@ -494,7 +494,7 @@ func TestSpanWithResourceNotStoredIfSubsegment(t *testing.T) { segment, _ := MakeSegment(span, resource, nil, false, nil, false) assert.NotNil(t, segment) - assert.Equal(t, 0, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 0) assert.Equal(t, "val1", segment.Metadata["default"]["attr1@1"]) assert.Equal(t, "val2", segment.Metadata["default"]["attr2@2"]) assert.Nil(t, segment.Metadata["default"]["otel.resource.string.key"]) @@ -517,7 +517,7 @@ func TestSpanWithAttributesPartlyIndexed(t *testing.T) { segment, _ := MakeSegment(span, resource, []string{"attr1@1", "not_exist"}, false, nil, false) assert.NotNil(t, segment) - assert.Equal(t, 1, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 1) assert.Equal(t, "val1", segment.Annotations["attr1_1"]) assert.Equal(t, "val2", segment.Metadata["default"]["attr2@2"]) } @@ -535,7 +535,7 @@ func TestSpanWithAnnotationsAttribute(t *testing.T) { segment, _ := MakeSegment(span, resource, nil, false, nil, false) assert.NotNil(t, segment) - assert.Equal(t, 1, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 1) assert.Equal(t, "val2", segment.Annotations["attr2_2"]) assert.Equal(t, "val1", segment.Metadata["default"]["attr1@1"]) } @@ -570,8 +570,8 @@ func TestSpanWithAttributesSegmentMetadata(t *testing.T) { segment, _ := MakeSegment(span, resource, nil, false, nil, false) assert.NotNil(t, segment) - assert.Equal(t, 0, len(segment.Annotations)) - assert.Equal(t, 2, len(segment.Metadata)) + assert.Len(t, segment.Annotations, 0) + assert.Len(t, segment.Metadata, 2) assert.Equal(t, "val1", segment.Metadata["default"]["attr1@1"]) assert.Equal(t, "custom_value", segment.Metadata["default"]["custom_key"]) assert.Equal(t, "retain-value", segment.Metadata["default"][awsxray.AWSXraySegmentMetadataAttributePrefix+"non-xray-sdk"]) @@ -602,7 +602,7 @@ func TestResourceAttributesCanBeIndexed(t *testing.T) { }, false, nil, false) assert.NotNil(t, segment) - assert.Equal(t, 4, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 4) assert.Equal(t, "string", segment.Annotations["otel_resource_string_key"]) assert.Equal(t, int64(10), segment.Annotations["otel_resource_int_key"]) assert.Equal(t, 5.0, segment.Annotations["otel_resource_double_key"]) @@ -635,7 +635,7 @@ func TestResourceAttributesCanBeIndexedWithAllowDot(t *testing.T) { }, false, nil, false) assert.NotNil(t, segment) - assert.Equal(t, 4, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 4) assert.Equal(t, "string", segment.Annotations["otel.resource.string.key"]) assert.Equal(t, int64(10), segment.Annotations["otel.resource.int.key"]) assert.Equal(t, 5.0, segment.Annotations["otel.resource.double.key"]) @@ -685,7 +685,7 @@ func TestSpanWithSpecialAttributesAsListed(t *testing.T) { segment, _ := MakeSegment(span, resource, []string{awsxray.AWSOperationAttribute, conventions.AttributeRPCMethod}, false, nil, false) assert.NotNil(t, segment) - assert.Equal(t, 2, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 2) assert.Equal(t, "aws_operation_val", segment.Annotations["aws_operation"]) assert.Equal(t, "rpc_method_val", segment.Annotations["rpc_method"]) } @@ -705,7 +705,7 @@ func TestSpanWithSpecialAttributesAsListedWithAllowDot(t *testing.T) { segment, _ := MakeSegment(span, resource, []string{awsxray.AWSOperationAttribute, conventions.AttributeRPCMethod}, false, nil, false) assert.NotNil(t, segment) - assert.Equal(t, 2, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 2) assert.Equal(t, "aws_operation_val", segment.Annotations[awsxray.AWSOperationAttribute]) assert.Equal(t, "rpc_method_val", segment.Annotations[conventions.AttributeRPCMethod]) } @@ -1235,12 +1235,12 @@ func validateLocalRootDependencySubsegment(t *testing.T, segment *awsxray.Segmen assert.Equal(t, expectedTraceID, *segment.TraceID) assert.NotNil(t, segment.HTTP) assert.Equal(t, "POST", *segment.HTTP.Request.Method) - assert.Equal(t, 2, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 2) assert.Nil(t, segment.Annotations[awsRemoteService]) assert.Nil(t, segment.Annotations[remoteTarget]) assert.Equal(t, "myAnnotationValue", segment.Annotations["myAnnotationKey"]) - assert.Equal(t, 8, len(segment.Metadata["default"])) + assert.Len(t, segment.Metadata["default"], 8) assert.Equal(t, "receive", segment.Metadata["default"][conventions.AttributeMessagingOperation]) assert.Equal(t, "LOCAL_ROOT", segment.Metadata["default"][awsSpanKind]) assert.Equal(t, "myRemoteOperation", segment.Metadata["default"][awsRemoteOperation]) @@ -1272,9 +1272,9 @@ func validateLocalRootServiceSegment(t *testing.T, segment *awsxray.Segment, spa assert.Equal(t, "myLocalService", *segment.Name) assert.Equal(t, expectedTraceID, *segment.TraceID) assert.Nil(t, segment.HTTP) - assert.Equal(t, 1, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 1) assert.Equal(t, "myAnnotationValue", segment.Annotations["myAnnotationKey"]) - assert.Equal(t, 1, len(segment.Metadata["default"])) + assert.Len(t, segment.Metadata["default"], 1) assert.Equal(t, "service.name=myTest", segment.Metadata["default"]["otel.resource.attributes"]) assert.Equal(t, "MySDK", *segment.AWS.XRay.SDK) assert.Equal(t, "1.20.0", *segment.AWS.XRay.SDKVersion) @@ -1351,14 +1351,14 @@ func TestLocalRootConsumer(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 2, len(segments)) + assert.Len(t, segments, 2) assert.Nil(t, err) validateLocalRootDependencySubsegment(t, segments[0], span, *segments[1].ID) assert.Nil(t, segments[0].Links) validateLocalRootServiceSegment(t, segments[1], span) - assert.Equal(t, 1, len(segments[1].Links)) + assert.Len(t, segments[1].Links, 1) // Checks these values are the same for both assert.Equal(t, segments[0].StartTime, segments[1].StartTime) @@ -1382,7 +1382,7 @@ func TestNonLocalRootConsumerProcess(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 1, len(segments)) + assert.Len(t, segments, 1) assert.Nil(t, err) tempTraceID := span.TraceID() @@ -1393,13 +1393,13 @@ func TestNonLocalRootConsumerProcess(t *testing.T) { assert.Equal(t, "destination operation", *segments[0].Name) assert.NotEqual(t, parentSpanID.String(), *segments[0].ID) assert.Equal(t, span.SpanID().String(), *segments[0].ID) - assert.Equal(t, 1, len(segments[0].Links)) + assert.Len(t, segments[0].Links, 1) assert.Equal(t, expectedTraceID, *segments[0].TraceID) assert.NotNil(t, segments[0].HTTP) assert.Equal(t, "POST", *segments[0].HTTP.Request.Method) - assert.Equal(t, 1, len(segments[0].Annotations)) + assert.Len(t, segments[0].Annotations, 1) assert.Equal(t, "myAnnotationValue", segments[0].Annotations["myAnnotationKey"]) - assert.Equal(t, 7, len(segments[0].Metadata["default"])) + assert.Len(t, segments[0].Metadata["default"], 7) assert.Equal(t, "Consumer", segments[0].Metadata["default"][awsSpanKind]) assert.Equal(t, "myLocalService", segments[0].Metadata["default"][awsLocalService]) assert.Equal(t, "receive", segments[0].Metadata["default"][conventions.AttributeMessagingOperation]) @@ -1428,7 +1428,7 @@ func TestLocalRootConsumerAWSNamespace(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 2, len(segments)) + assert.Len(t, segments, 2) assert.Nil(t, err) // Ensure that AWS namespace is not overwritten to remote @@ -1454,11 +1454,11 @@ func TestLocalRootClient(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 2, len(segments)) + assert.Len(t, segments, 2) assert.Nil(t, err) validateLocalRootDependencySubsegment(t, segments[0], span, *segments[1].ID) - assert.Equal(t, 1, len(segments[0].Links)) + assert.Len(t, segments[0].Links, 1) validateLocalRootServiceSegment(t, segments[1], span) assert.Nil(t, segments[1].Links) @@ -1491,7 +1491,7 @@ func TestLocalRootClientAwsServiceMetrics(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 2, len(segments)) + assert.Len(t, segments, 2) assert.Nil(t, err) subsegment := segments[0] @@ -1515,11 +1515,11 @@ func TestLocalRootProducer(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 2, len(segments)) + assert.Len(t, segments, 2) assert.Nil(t, err) validateLocalRootDependencySubsegment(t, segments[0], span, *segments[1].ID) - assert.Equal(t, 1, len(segments[0].Links)) + assert.Len(t, segments[0].Links, 1) validateLocalRootServiceSegment(t, segments[1], span) assert.Nil(t, segments[1].Links) @@ -1537,10 +1537,10 @@ func validateLocalRootWithoutDependency(t *testing.T, segment *awsxray.Segment, assert.Nil(t, segment.Type) assert.Equal(t, "myLocalService", *segment.Name) assert.Equal(t, span.ParentSpanID().String(), *segment.ParentID) - assert.Equal(t, 1, len(segment.Links)) + assert.Len(t, segment.Links, 1) assert.Equal(t, expectedTraceID, *segment.TraceID) assert.Equal(t, "POST", *segment.HTTP.Request.Method) - assert.Equal(t, 2, len(segment.Annotations)) + assert.Len(t, segment.Annotations, 2) assert.Equal(t, "myRemoteService", segment.Annotations["aws_remote_service"]) assert.Equal(t, "myAnnotationValue", segment.Annotations["myAnnotationKey"]) @@ -1550,7 +1550,7 @@ func validateLocalRootWithoutDependency(t *testing.T, segment *awsxray.Segment, numberOfMetadataKeys = 30 } - assert.Equal(t, numberOfMetadataKeys, len(segment.Metadata["default"])) + assert.Len(t, segment.Metadata["default"], numberOfMetadataKeys) assert.Equal(t, "receive", segment.Metadata["default"][conventions.AttributeMessagingOperation]) assert.Equal(t, "LOCAL_ROOT", segment.Metadata["default"][awsSpanKind]) assert.Equal(t, "myRemoteOperation", segment.Metadata["default"][awsRemoteOperation]) @@ -1592,7 +1592,7 @@ func TestLocalRootServer(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 1, len(segments)) + assert.Len(t, segments, 1) assert.Nil(t, err) validateLocalRootWithoutDependency(t, segments[0], span) @@ -1615,7 +1615,7 @@ func TestLocalRootInternal(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 1, len(segments)) + assert.Len(t, segments, 1) assert.Nil(t, err) validateLocalRootWithoutDependency(t, segments[0], span) @@ -1636,7 +1636,7 @@ func TestNotLocalRootInternal(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 1, len(segments)) + assert.Len(t, segments, 1) assert.Nil(t, err) // Validate segment @@ -1660,7 +1660,7 @@ func TestNotLocalRootConsumer(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 1, len(segments)) + assert.Len(t, segments, 1) assert.Nil(t, err) // Validate segment @@ -1684,7 +1684,7 @@ func TestNotLocalRootClient(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 1, len(segments)) + assert.Len(t, segments, 1) assert.Nil(t, err) // Validate segment @@ -1708,7 +1708,7 @@ func TestNotLocalRootProducer(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 1, len(segments)) + assert.Len(t, segments, 1) assert.Nil(t, err) // Validate segment @@ -1734,7 +1734,7 @@ func TestNotLocalRootServer(t *testing.T) { segments, err := MakeSegmentsFromSpan(span, resource, []string{awsRemoteService, "myAnnotationKey"}, false, nil, false) assert.NotNil(t, segments) - assert.Equal(t, 1, len(segments)) + assert.Len(t, segments, 1) assert.Nil(t, err) // Validate segment diff --git a/exporter/awsxrayexporter/internal/translator/span_links_test.go b/exporter/awsxrayexporter/internal/translator/span_links_test.go index f299bd4f01f9..5304ad60c65c 100644 --- a/exporter/awsxrayexporter/internal/translator/span_links_test.go +++ b/exporter/awsxrayexporter/internal/translator/span_links_test.go @@ -30,10 +30,10 @@ func TestSpanLinkSimple(t *testing.T) { var convertedTraceID, _ = convertToAmazonTraceID(traceID, false) - assert.Equal(t, 1, len(segment.Links)) + assert.Len(t, segment.Links, 1) assert.Equal(t, spanLink.SpanID().String(), *segment.Links[0].SpanID) assert.Equal(t, convertedTraceID, *segment.Links[0].TraceID) - assert.Equal(t, 0, len(segment.Links[0].Attributes)) + assert.Len(t, segment.Links[0].Attributes, 0) jsonStr, _ := MakeSegmentDocumentString(span, resource, nil, false, nil, false) @@ -52,7 +52,7 @@ func TestSpanLinkEmpty(t *testing.T) { segment, _ := MakeSegment(span, resource, nil, false, nil, false) - assert.Equal(t, 0, len(segment.Links)) + assert.Len(t, segment.Links, 0) jsonStr, _ := MakeSegmentDocumentString(span, resource, nil, false, nil, false) @@ -111,16 +111,16 @@ func TestTwoSpanLinks(t *testing.T) { var convertedTraceID1, _ = convertToAmazonTraceID(traceID1, false) var convertedTraceID2, _ = convertToAmazonTraceID(traceID2, false) - assert.Equal(t, 2, len(segment.Links)) + assert.Len(t, segment.Links, 2) assert.Equal(t, spanLink1.SpanID().String(), *segment.Links[0].SpanID) assert.Equal(t, convertedTraceID1, *segment.Links[0].TraceID) - assert.Equal(t, 1, len(segment.Links[0].Attributes)) + assert.Len(t, segment.Links[0].Attributes, 1) assert.Equal(t, "ABC", segment.Links[0].Attributes["myKey1"]) assert.Equal(t, spanLink2.SpanID().String(), *segment.Links[1].SpanID) assert.Equal(t, convertedTraceID2, *segment.Links[1].TraceID) - assert.Equal(t, 1, len(segment.Links[0].Attributes)) + assert.Len(t, segment.Links[0].Attributes, 1) assert.Equal(t, int64(1234), segment.Links[1].Attributes["myKey2"]) jsonStr, _ := MakeSegmentDocumentString(span, resource, nil, false, nil, false) @@ -172,8 +172,8 @@ func TestSpanLinkComplexAttributes(t *testing.T) { segment, _ := MakeSegment(span, resource, nil, false, nil, false) - assert.Equal(t, 1, len(segment.Links)) - assert.Equal(t, 8, len(segment.Links[0].Attributes)) + assert.Len(t, segment.Links, 1) + assert.Len(t, segment.Links[0].Attributes, 8) assert.Equal(t, "myValue", segment.Links[0].Attributes["myKey1"]) assert.Equal(t, true, segment.Links[0].Attributes["myKey2"]) diff --git a/exporter/azuredataexplorerexporter/adx_exporter_test.go b/exporter/azuredataexplorerexporter/adx_exporter_test.go index 857ab8902ebb..186a8839e700 100644 --- a/exporter/azuredataexplorerexporter/adx_exporter_test.go +++ b/exporter/azuredataexplorerexporter/adx_exporter_test.go @@ -168,7 +168,7 @@ func TestIngestedDataRecordCount(t *testing.T) { recordstoingest := genRand.Intn(20) err := adxDataProducer.metricsDataPusher(context.Background(), createMetricsData(recordstoingest)) ingestedrecordsactual := ingestor.Records() - assert.Equal(t, recordstoingest, len(ingestedrecordsactual), "Number of metrics created should match number of records ingested") + assert.Len(t, ingestedrecordsactual, recordstoingest, "Number of metrics created should match number of records ingested") assert.NoError(t, err) } diff --git a/exporter/azuremonitorexporter/metricexporter_test.go b/exporter/azuremonitorexporter/metricexporter_test.go index 1fc84bd49f33..0be8c6c86a73 100644 --- a/exporter/azuremonitorexporter/metricexporter_test.go +++ b/exporter/azuremonitorexporter/metricexporter_test.go @@ -107,7 +107,7 @@ func TestSummaryEnvelopes(t *testing.T) { func getDataPoint(t testing.TB, metric pmetric.Metric) *contracts.DataPoint { var envelopes []*contracts.Envelope = getMetricPacker().MetricToEnvelopes(metric, getResource(), getScope()) - require.Equal(t, len(envelopes), 1) + require.Len(t, envelopes, 1) envelope := envelopes[0] require.NotNil(t, envelope) @@ -123,7 +123,7 @@ func getDataPoint(t testing.TB, metric pmetric.Metric) *contracts.DataPoint { metricData := envelopeData.BaseData.(*contracts.MetricData) - require.Equal(t, len(metricData.Metrics), 1) + require.Len(t, metricData.Metrics, 1) dataPoint := metricData.Metrics[0] require.NotNil(t, dataPoint) diff --git a/exporter/azuremonitorexporter/trace_to_envelope_test.go b/exporter/azuremonitorexporter/trace_to_envelope_test.go index f5bc60e401cb..a0e212ef21a9 100644 --- a/exporter/azuremonitorexporter/trace_to_envelope_test.go +++ b/exporter/azuremonitorexporter/trace_to_envelope_test.go @@ -512,7 +512,7 @@ func TestSpanWithEventsToEnvelopes(t *testing.T) { envelopes, _ := spanToEnvelopes(defaultResource, defaultInstrumentationLibrary, span, true, zap.NewNop()) assert.NotNil(t, envelopes) - assert.Equal(t, 3, len(envelopes)) + assert.Len(t, envelopes, 3) validateEnvelope := func(spanEvent ptrace.SpanEvent, envelope *contracts.Envelope, targetEnvelopeName string) { assert.Equal(t, targetEnvelopeName, envelope.Name) diff --git a/exporter/datadogexporter/examples_test.go b/exporter/datadogexporter/examples_test.go index 775d86028b6d..e13f354406f7 100644 --- a/exporter/datadogexporter/examples_test.go +++ b/exporter/datadogexporter/examples_test.go @@ -82,7 +82,7 @@ func TestExamples(t *testing.T) { require.NoError(t, err) n, err := f.Write(data) require.NoError(t, err) - require.Equal(t, n, len(data)) + require.Len(t, data, n) require.NoError(t, f.Close()) defer os.RemoveAll(f.Name()) // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594 diff --git a/exporter/datadogexporter/internal/metrics/series_deprecated_test.go b/exporter/datadogexporter/internal/metrics/series_deprecated_test.go index 6ec075f680d9..4ca397ca8010 100644 --- a/exporter/datadogexporter/internal/metrics/series_deprecated_test.go +++ b/exporter/datadogexporter/internal/metrics/series_deprecated_test.go @@ -51,7 +51,7 @@ func TestDefaultZorkianMetrics(t *testing.T) { assert.Equal(t, "otel.datadog_exporter.metrics.running", *ms[0].Metric) // Assert metrics list length (should be 1) - assert.Equal(t, 1, len(ms)) + assert.Len(t, ms, 1) // Assert timestamp assert.Equal(t, 2.0, *ms[0].Points[0][0]) // Assert value (should always be 1.0) diff --git a/exporter/datadogexporter/internal/metrics/series_test.go b/exporter/datadogexporter/internal/metrics/series_test.go index d49210f3ea49..e195fc271bfc 100644 --- a/exporter/datadogexporter/internal/metrics/series_test.go +++ b/exporter/datadogexporter/internal/metrics/series_test.go @@ -51,7 +51,7 @@ func TestDefaultMetrics(t *testing.T) { assert.Equal(t, "otel.datadog_exporter.metrics.running", ms[0].Metric) // Assert metrics list length (should be 1) - assert.Equal(t, 1, len(ms)) + assert.Len(t, ms, 1) // Assert timestamp assert.Equal(t, int64(2), *ms[0].Points[0].Timestamp) // Assert value (should always be 1.0) @@ -73,7 +73,7 @@ func TestDefaultMetricsWithRuntimeMetrics(t *testing.T) { assert.Equal(t, "otel.datadog_exporter.runtime_metrics.running", ms[0].Metric) // Assert metrics list length (should be 1) - assert.Equal(t, 1, len(ms)) + assert.Len(t, ms, 1) // Assert timestamp assert.Equal(t, int64(2), *ms[0].Points[0].Timestamp) // Assert value (should always be 1.0) diff --git a/exporter/datadogexporter/metrics_exporter_test.go b/exporter/datadogexporter/metrics_exporter_test.go index 4c8cd0ef08ee..e0adc0452e1f 100644 --- a/exporter/datadogexporter/metrics_exporter_test.go +++ b/exporter/datadogexporter/metrics_exporter_test.go @@ -72,7 +72,7 @@ func TestNewExporter(t *testing.T) { testutil.TestMetrics.CopyTo(testMetrics) err = exp.ConsumeMetrics(context.Background(), testMetrics) require.NoError(t, err) - assert.Equal(t, len(server.MetadataChan), 0) + assert.Len(t, server.MetadataChan, 0) cfg.HostMetadata.Enabled = true cfg.HostMetadata.HostnameSource = HostnameSourceFirstResource @@ -393,7 +393,7 @@ func TestNewExporter_Zorkian(t *testing.T) { testutil.TestMetrics.CopyTo(testMetrics) err = exp.ConsumeMetrics(context.Background(), testMetrics) require.NoError(t, err) - assert.Equal(t, len(server.MetadataChan), 0) + assert.Len(t, server.MetadataChan, 0) cfg.HostMetadata.Enabled = true cfg.HostMetadata.HostnameSource = HostnameSourceFirstResource diff --git a/exporter/googlemanagedprometheusexporter/config_test.go b/exporter/googlemanagedprometheusexporter/config_test.go index a69c3427777b..7a1fb2ac38f6 100644 --- a/exporter/googlemanagedprometheusexporter/config_test.go +++ b/exporter/googlemanagedprometheusexporter/config_test.go @@ -31,7 +31,7 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, cfg) - assert.Equal(t, len(cfg.Exporters), 2) + assert.Len(t, cfg.Exporters, 2) r0 := cfg.Exporters[component.NewID(metadata.Type)].(*Config) assert.Equal(t, r0, factory.CreateDefaultConfig().(*Config)) diff --git a/exporter/influxdbexporter/writer_test.go b/exporter/influxdbexporter/writer_test.go index ed882ca9be71..4d656ba352b7 100644 --- a/exporter/influxdbexporter/writer_test.go +++ b/exporter/influxdbexporter/writer_test.go @@ -138,9 +138,9 @@ func Test_influxHTTPWriterBatch_maxPayload(t *testing.T) { require.NoError(t, err) if testCase.expectMultipleRequests { - assert.Equal(t, 2, len(httpRequests)) + assert.Len(t, httpRequests, 2) } else { - assert.Equal(t, 1, len(httpRequests)) + assert.Len(t, httpRequests, 1) } }) } diff --git a/exporter/otelarrowexporter/internal/arrow/exporter_test.go b/exporter/otelarrowexporter/internal/arrow/exporter_test.go index 79230680f914..411c61bba478 100644 --- a/exporter/otelarrowexporter/internal/arrow/exporter_test.go +++ b/exporter/otelarrowexporter/internal/arrow/exporter_test.go @@ -228,7 +228,7 @@ func TestArrowExporterSuccess(t *testing.T) { case ptrace.Traces: traces, err := testCon.TracesFrom(outputData) require.NoError(t, err) - require.Equal(t, 1, len(traces)) + require.Len(t, traces, 1) otelAssert.Equiv(stdTesting, []json.Marshaler{ compareJSONTraces{testData}, }, []json.Marshaler{ @@ -237,7 +237,7 @@ func TestArrowExporterSuccess(t *testing.T) { case plog.Logs: logs, err := testCon.LogsFrom(outputData) require.NoError(t, err) - require.Equal(t, 1, len(logs)) + require.Len(t, logs, 1) otelAssert.Equiv(stdTesting, []json.Marshaler{ compareJSONLogs{testData}, }, []json.Marshaler{ @@ -246,7 +246,7 @@ func TestArrowExporterSuccess(t *testing.T) { case pmetric.Metrics: metrics, err := testCon.MetricsFrom(outputData) require.NoError(t, err) - require.Equal(t, 1, len(metrics)) + require.Len(t, metrics, 1) otelAssert.Equiv(stdTesting, []json.Marshaler{ compareJSONMetrics{testData}, }, []json.Marshaler{ @@ -547,7 +547,7 @@ func TestArrowExporterStreaming(t *testing.T) { for data := range channel.sendChannel() { traces, err := testCon.TracesFrom(data) require.NoError(t, err) - require.Equal(t, 1, len(traces)) + require.Len(t, traces, 1) actualOutput = append(actualOutput, traces[0]) channel.recv <- statusOKFor(data.BatchId) } @@ -757,7 +757,7 @@ func TestArrowExporterStreamLifetimeAndShutdown(t *testing.T) { for data := range channel.sendChannel() { traces, err := testCon.TracesFrom(data) require.NoError(t, err) - require.Equal(t, 1, len(traces)) + require.Len(t, traces, 1) atomic.AddUint64(&actualCount, 1) channel.recv <- statusOKFor(data.BatchId) } diff --git a/exporter/otelarrowexporter/otelarrow_test.go b/exporter/otelarrowexporter/otelarrow_test.go index acf84c6dc11e..1be964b98401 100644 --- a/exporter/otelarrowexporter/otelarrow_test.go +++ b/exporter/otelarrowexporter/otelarrow_test.go @@ -394,7 +394,7 @@ func TestSendTraces(t *testing.T) { // Test the static metadata md = rcv.getMetadata() require.EqualValues(t, expectedHeader, md.Get("header")) - require.Equal(t, len(md.Get("User-Agent")), 1) + require.Len(t, md.Get("User-Agent"), 1) require.Contains(t, md.Get("User-Agent")[0], "Collector/1.2.3test") // Test the caller's dynamic metadata @@ -567,7 +567,7 @@ func TestSendMetrics(t *testing.T) { mdata := rcv.getMetadata() require.EqualValues(t, mdata.Get("header"), expectedHeader) - require.Equal(t, len(mdata.Get("User-Agent")), 1) + require.Len(t, mdata.Get("User-Agent"), 1) require.Contains(t, mdata.Get("User-Agent")[0], "Collector/1.2.3test") st := status.New(codes.InvalidArgument, "Invalid argument") @@ -858,7 +858,7 @@ func TestSendLogData(t *testing.T) { assert.EqualValues(t, ld, rcv.getLastRequest()) md := rcv.getMetadata() - require.Equal(t, len(md.Get("User-Agent")), 1) + require.Len(t, md.Get("User-Agent"), 1) require.Contains(t, md.Get("User-Agent")[0], "Collector/1.2.3test") st := status.New(codes.InvalidArgument, "Invalid argument") @@ -1186,6 +1186,6 @@ func TestUserDialOptions(t *testing.T) { err = exp.ConsumeTraces(context.Background(), td) assert.NoError(t, err) - require.Equal(t, len(rcv.getMetadata().Get("User-Agent")), 1) + require.Len(t, rcv.getMetadata().Get("User-Agent"), 1) require.Contains(t, rcv.getMetadata().Get("User-Agent")[0], testAgent) } diff --git a/exporter/prometheusexporter/collector_test.go b/exporter/prometheusexporter/collector_test.go index 03141fe6bf91..603dab7130c6 100644 --- a/exporter/prometheusexporter/collector_test.go +++ b/exporter/prometheusexporter/collector_test.go @@ -126,7 +126,7 @@ func exemplarsEqual(t *testing.T, otelExemplar pmetric.Exemplar, promExemplar *i } require.Equal(t, givenValue, promExemplar.GetValue()) - require.Equal(t, 2, len(promExemplar.GetLabel())) + require.Len(t, promExemplar.GetLabel(), 2) ml := make(map[string]string) for _, l := range promExemplar.GetLabel() { ml[l.GetName()] = l.GetValue() @@ -174,7 +174,7 @@ func TestConvertDoubleHistogramExemplar(t *testing.T) { buckets := m.GetHistogram().GetBucket() - require.Equal(t, 3, len(buckets)) + require.Len(t, buckets, 3) require.Equal(t, 3.0, buckets[0].GetExemplar().GetValue()) exemplarsEqual(t, promExporterExemplars, buckets[0].GetExemplar()) diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 052ab03cff9a..00c0ba318ffe 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -269,7 +269,7 @@ func Test_export(t *testing.T) { ok := proto.Unmarshal(dest, writeReq) require.NoError(t, ok) - assert.EqualValues(t, 1, len(writeReq.Timeseries)) + assert.Len(t, writeReq.Timeseries, 1) require.NotNil(t, writeReq.GetTimeseries()) assert.Equal(t, *ts1, writeReq.GetTimeseries()[0]) w.WriteHeader(code) @@ -457,7 +457,7 @@ func Test_PushMetrics(t *testing.T) { wr := &prompb.WriteRequest{} ok := proto.Unmarshal(dest, wr) require.Nil(t, ok) - assert.EqualValues(t, expected, len(wr.Timeseries)) + assert.Len(t, wr.Timeseries, expected) if isStaleMarker { assert.True(t, value.IsStaleNaN(wr.Timeseries[0].Samples[0].Value)) } @@ -1007,10 +1007,10 @@ func TestWALOnExporterRoundTrip(t *testing.T) { assert.NoError(t, err) reqs = append(reqs, req) } - assert.Equal(t, 1, len(reqs)) + assert.Len(t, reqs, 1) // We MUST have 2 time series as were passed into tsMap. gotFromWAL := reqs[0] - assert.Equal(t, 2, len(gotFromWAL.Timeseries)) + assert.Len(t, gotFromWAL.Timeseries, 2) want := &prompb.WriteRequest{ Timeseries: orderBySampleTimestamp([]prompb.TimeSeries{ *ts1, *ts2, diff --git a/exporter/prometheusremotewriteexporter/helper_test.go b/exporter/prometheusremotewriteexporter/helper_test.go index afd2a4958ae9..d0454d4cb98b 100644 --- a/exporter/prometheusremotewriteexporter/helper_test.go +++ b/exporter/prometheusremotewriteexporter/helper_test.go @@ -66,7 +66,7 @@ func Test_batchTimeSeries(t *testing.T) { } assert.NoError(t, err) - assert.Equal(t, tt.numExpectedRequests, len(requests)) + assert.Len(t, requests, tt.numExpectedRequests) if tt.numExpectedRequests <= 1 { assert.Equal(t, math.MaxInt, state.nextTimeSeriesBufferSize) assert.Equal(t, math.MaxInt, state.nextMetricMetadataBufferSize) @@ -100,7 +100,7 @@ func Test_batchTimeSeriesUpdatesStateForLargeBatches(t *testing.T) { requests, err := batchTimeSeries(tsMap1, 1000000, nil, &state) assert.NoError(t, err) - assert.Equal(t, 18, len(requests)) + assert.Len(t, requests, 18) assert.Equal(t, len(requests[len(requests)-2].Timeseries)*2, state.nextTimeSeriesBufferSize) assert.Equal(t, math.MaxInt, state.nextMetricMetadataBufferSize) assert.Equal(t, 36, state.nextRequestBufferSize) @@ -134,7 +134,7 @@ func Benchmark_batchTimeSeries(b *testing.B) { for i := 0; i < b.N; i++ { requests, err := batchTimeSeries(tsMap1, 1000000, nil, &state) assert.NoError(b, err) - assert.Equal(b, 18, len(requests)) + assert.Len(b, requests, 18) } } diff --git a/exporter/signalfxexporter/factory_test.go b/exporter/signalfxexporter/factory_test.go index 89b0e1b2308a..7385d00b15b8 100644 --- a/exporter/signalfxexporter/factory_test.go +++ b/exporter/signalfxexporter/factory_test.go @@ -143,13 +143,13 @@ func TestDefaultTranslationRules(t *testing.T) { dps, ok = metrics["system.disk.operations.total"] require.True(t, ok, "system.disk.operations.total metrics not found") require.Len(t, dps, 4) - require.Equal(t, 2, len(dps[0].Dimensions)) + require.Len(t, dps[0].Dimensions, 2) // system.disk.io.total new metric calculation dps, ok = metrics["system.disk.io.total"] require.True(t, ok, "system.disk.io.total metrics not found") require.Len(t, dps, 2) - require.Equal(t, 2, len(dps[0].Dimensions)) + require.Len(t, dps[0].Dimensions, 2) for _, dp := range dps { var directionFound bool for _, dim := range dp.Dimensions { @@ -173,20 +173,20 @@ func TestDefaultTranslationRules(t *testing.T) { require.True(t, ok, "disk_ops.total metrics not found") require.Len(t, dps, 1) require.Equal(t, int64(8e3), *dps[0].Value.IntValue) - require.Equal(t, 1, len(dps[0].Dimensions)) + require.Len(t, dps[0].Dimensions, 1) requireDimension(t, dps[0].Dimensions, "host", "host0") // system.network.io.total new metric calculation dps, ok = metrics["system.network.io.total"] require.True(t, ok, "system.network.io.total metrics not found") require.Len(t, dps, 2) - require.Equal(t, 4, len(dps[0].Dimensions)) + require.Len(t, dps[0].Dimensions, 4) // system.network.packets.total new metric calculation dps, ok = metrics["system.network.packets.total"] require.True(t, ok, "system.network.packets.total metrics not found") require.Len(t, dps, 1) - require.Equal(t, 4, len(dps[0].Dimensions)) + require.Len(t, dps[0].Dimensions, 4) require.Equal(t, int64(350), *dps[0].Value.IntValue) requireDimension(t, dps[0].Dimensions, "direction", "receive") @@ -194,7 +194,7 @@ func TestDefaultTranslationRules(t *testing.T) { dps, ok = metrics["network.total"] require.True(t, ok, "network.total metrics not found") require.Len(t, dps, 1) - require.Equal(t, 3, len(dps[0].Dimensions)) + require.Len(t, dps[0].Dimensions, 3) require.Equal(t, int64(10e9), *dps[0].Value.IntValue) } @@ -462,13 +462,13 @@ func TestDefaultDiskTranslations(t *testing.T) { du, ok := m["disk.utilization"] require.True(t, ok) - require.Equal(t, 4, len(du[0].Dimensions)) + require.Len(t, du[0].Dimensions, 4) // cheap test for pct conversion require.True(t, *du[0].Value.DoubleValue > 1) dsu, ok := m["disk.summary_utilization"] require.True(t, ok) - require.Equal(t, 3, len(dsu[0].Dimensions)) + require.Len(t, dsu[0].Dimensions, 3) require.True(t, *dsu[0].Value.DoubleValue > 1) } @@ -506,16 +506,16 @@ func TestDefaultCPUTranslations(t *testing.T) { } cpuUtil := m["cpu.utilization"] - require.Equal(t, 1, len(cpuUtil)) + require.Len(t, cpuUtil, 1) for _, pt := range cpuUtil { require.Equal(t, 66, int(*pt.Value.DoubleValue)) } cpuUtilPerCore := m["cpu.utilization_per_core"] - require.Equal(t, 8, len(cpuUtilPerCore)) + require.Len(t, cpuUtilPerCore, 8) cpuNumProcessors := m["cpu.num_processors"] - require.Equal(t, 1, len(cpuNumProcessors)) + require.Len(t, cpuNumProcessors, 1) cpuStateMetrics := []string{"cpu.idle", "cpu.interrupt", "cpu.system", "cpu.user"} for _, metric := range cpuStateMetrics { @@ -583,7 +583,7 @@ func TestDefaultExcludesTranslated(t *testing.T) { // the default cpu.utilization metric is added after applying the default translations // (because cpu.utilization_per_core is supplied) and should not be excluded - require.Equal(t, 1, len(dps)) + require.Len(t, dps, 1) require.Equal(t, "cpu.utilization", dps[0].Metric) } @@ -603,7 +603,7 @@ func TestDefaultExcludes_not_translated(t *testing.T) { md := getMetrics(metrics) require.Equal(t, 69, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len()) dps := converter.MetricsToSignalFxV2(md) - require.Equal(t, 0, len(dps)) + require.Len(t, dps, 0) } // Benchmark test for default translation rules on an example hostmetrics dataset. diff --git a/exporter/signalfxexporter/internal/apm/tracetracker/tracker_test.go b/exporter/signalfxexporter/internal/apm/tracetracker/tracker_test.go index b7fb27695550..0498655bfaf8 100644 --- a/exporter/signalfxexporter/internal/apm/tracetracker/tracker_test.go +++ b/exporter/signalfxexporter/internal/apm/tracetracker/tracker_test.go @@ -150,7 +150,7 @@ func TestCorrelationEmptyEnvironment(t *testing.T) { a.ProcessTraces(context.Background(), fakeTraces) cors := correlationClient.getCorrelations() - assert.Equal(t, 4, len(cors), "expected 4 correlations to be made") + assert.Len(t, cors, 4, "expected 4 correlations to be made") for _, c := range cors { assert.Contains(t, []string{"container_id", "kubernetes_pod_uid", "host", "AWSUniqueId"}, c.DimName) assert.Contains(t, []string{"test", "randomAWSUniqueId", "testk8sPodUID", "testContainerID"}, c.DimValue) @@ -206,5 +206,5 @@ func TestCorrelationUpdates(t *testing.T) { numHostIDDimCorrelations := len(hostIDDims)*(numEnvironments+numServices) + 4 /* 4 deletes for service & environment fetched at startup */ numContainerLevelCorrelations := 2 * len(containerLevelIDDims) totalExpectedCorrelations := numHostIDDimCorrelations + numContainerLevelCorrelations - assert.Equal(t, totalExpectedCorrelations, len(correlationClient.getCorrelations()), "# of correlation requests do not match") + assert.Len(t, correlationClient.getCorrelations(), totalExpectedCorrelations, "# of correlation requests do not match") } diff --git a/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go b/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go index 422723161775..6a35073409a9 100644 --- a/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go +++ b/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go @@ -255,10 +255,10 @@ func TestSyncMetadata(t *testing.T) { syncer.Sync(tt.metricsData) if tt.wantMetadataUpdate != nil { - require.Equal(t, 1, len(dimClient.getMetadataUpdates())) + require.Len(t, dimClient.getMetadataUpdates(), 1) require.EqualValues(t, tt.wantMetadataUpdate, dimClient.getMetadataUpdates()[0]) } else { - require.Equal(t, 0, len(dimClient.getMetadataUpdates())) + require.Len(t, dimClient.getMetadataUpdates(), 0) } require.Equal(t, len(tt.wantLogs), logs.Len()) diff --git a/exporter/signalfxexporter/internal/translation/converter_test.go b/exporter/signalfxexporter/internal/translation/converter_test.go index aecb3858c2e4..c8a18acb676d 100644 --- a/exporter/signalfxexporter/internal/translation/converter_test.go +++ b/exporter/signalfxexporter/internal/translation/converter_test.go @@ -1094,7 +1094,7 @@ func Test_MetricDataToSignalFxV2WithHistogramBuckets(t *testing.T) { // of those is not deterministic. sortDimensions(tt.wantSfxDataPoints) sortDimensions(gotSfxDataPoints) - assert.Equal(t, tt.wantCount, len(gotSfxDataPoints)) + assert.Len(t, gotSfxDataPoints, tt.wantCount) assert.Equal(t, tt.wantSfxDataPoints, gotSfxDataPoints) }) } @@ -1193,7 +1193,7 @@ func TestInvalidNumberOfDimensions(t *testing.T) { } c, err := NewMetricsConverter(logger, nil, nil, nil, "_-.", false, true) require.NoError(t, err) - assert.EqualValues(t, 1, len(c.MetricsToSignalFxV2(md))) + assert.Len(t, c.MetricsToSignalFxV2(md), 1) // No log message should be printed require.Equal(t, 0, observedLogs.Len()) @@ -1216,7 +1216,7 @@ func TestInvalidNumberOfDimensions(t *testing.T) { Value: fmt.Sprint("dim_val_", i), }) } - assert.EqualValues(t, 0, len(c.MetricsToSignalFxV2(mdInvalid))) + assert.Len(t, c.MetricsToSignalFxV2(mdInvalid), 0) require.Equal(t, 1, observedLogs.Len()) assert.Equal(t, "dropping datapoint", observedLogs.All()[0].Message) assert.ElementsMatch(t, []zap.Field{ diff --git a/exporter/signalfxexporter/internal/translation/translator_test.go b/exporter/signalfxexporter/internal/translation/translator_test.go index f59bbc6477db..23e13981f9f0 100644 --- a/exporter/signalfxexporter/internal/translation/translator_test.go +++ b/exporter/signalfxexporter/internal/translation/translator_test.go @@ -2027,7 +2027,7 @@ func TestNewCalculateNewMetricErrors(t *testing.T) { }}, 1, make(chan struct{})) require.NoError(t, err) tr := mt.TranslateDataPoints(logger, dps) - require.Equal(t, 2, len(tr)) + require.Len(t, tr, 2) if test.wantErr == "" { require.Equal(t, 0, observedLogs.Len()) } else { @@ -2086,7 +2086,7 @@ func TestCalcNewMetricInputPairs_SameDims(t *testing.T) { }, } pairs := calcNewMetricInputPairs(pts, rule) - require.Equal(t, 1, len(pairs)) + require.Len(t, pairs, 1) pair := pairs[0] require.Equal(t, "m1", pair[0].Metric) require.Equal(t, "m2", pair[1].Metric) @@ -2149,7 +2149,7 @@ func TestNewMetricInputPairs_MultiPairs(t *testing.T) { }, } pairs := calcNewMetricInputPairs(pts, rule) - require.Equal(t, 2, len(pairs)) + require.Len(t, pairs, 2) pair1 := pairs[0] require.EqualValues(t, 1, *pair1[0].Value.IntValue) require.EqualValues(t, 2, *pair1[1].Value.IntValue) @@ -2528,7 +2528,7 @@ func TestDeltaTranslatorNoMatchingMapping(t *testing.T) { c := testConverter(t, map[string]string{"foo": "bar"}) md := intMD(1, 1) idx := indexPts(c.MetricsToSignalFxV2(md)) - require.Equal(t, 1, len(idx)) + require.Len(t, idx, 1) } func TestDeltaTranslatorMismatchedValueTypes(t *testing.T) { @@ -2541,7 +2541,7 @@ func TestDeltaTranslatorMismatchedValueTypes(t *testing.T) { dblTS("cpu0", "user", 1, 1, 1, md2.SetEmptySum().DataPoints().AppendEmpty()) pts := c.MetricsToSignalFxV2(wrapMetric(md2)) idx := indexPts(pts) - require.Equal(t, 1, len(idx)) + require.Len(t, idx, 1) } func requireDeltaMetricOk(t *testing.T, md1, md2, md3 pmetric.Metrics) ( @@ -2551,11 +2551,11 @@ func requireDeltaMetricOk(t *testing.T, md1, md2, md3 pmetric.Metrics) ( dp1 := c.MetricsToSignalFxV2(md1) m1 := indexPts(dp1) - require.Equal(t, 1, len(m1)) + require.Len(t, m1, 1) dp2 := c.MetricsToSignalFxV2(md2) m2 := indexPts(dp2) - require.Equal(t, 2, len(m2)) + require.Len(t, m2, 2) origPts, ok := m2["system.cpu.time"] require.True(t, ok) @@ -2570,7 +2570,7 @@ func requireDeltaMetricOk(t *testing.T, md1, md2, md3 pmetric.Metrics) ( dp3 := c.MetricsToSignalFxV2(md3) m3 := indexPts(dp3) - require.Equal(t, 2, len(m3)) + require.Len(t, m3, 2) deltaPts2, ok := m3["system.cpu.delta"] require.True(t, ok) diff --git a/exporter/splunkhecexporter/client_test.go b/exporter/splunkhecexporter/client_test.go index dacbfb1beca7..fc25ba708025 100644 --- a/exporter/splunkhecexporter/client_test.go +++ b/exporter/splunkhecexporter/client_test.go @@ -786,7 +786,7 @@ func TestReceiveLogs(t *testing.T) { return } require.NoError(t, err) - require.Equal(t, test.want.numBatches, len(got)) + require.Len(t, got, test.want.numBatches) for i, wantBatch := range test.want.batches { require.NotZero(t, got[i]) @@ -1181,7 +1181,7 @@ func TestReceiveBatchedMetrics(t *testing.T) { } if test.want.numBatches == 0 { - assert.Equal(t, 0, len(got)) + assert.Len(t, got, 0) return } @@ -1670,7 +1670,7 @@ func Test_pushLogData_ShouldAddHeadersForProfilingData(t *testing.T) { require.NoError(t, err) err = c.pushLogData(context.Background(), profilingData) require.NoError(t, err) - assert.Equal(t, 30, len(*headers)) + assert.Len(t, *headers, 30) profilingCount, nonProfilingCount := 0, 0 for i := range *headers { diff --git a/exporter/splunkhecexporter/integration_test.go b/exporter/splunkhecexporter/integration_test.go index d064207bd24d..3c2a930dbee9 100644 --- a/exporter/splunkhecexporter/integration_test.go +++ b/exporter/splunkhecexporter/integration_test.go @@ -252,7 +252,7 @@ func logsTest(t *testing.T, config *Config, url *url.URL, test testCfg) { waitForEventToBeIndexed() events := integrationtestutils.CheckEventsFromSplunk("index="+test.config.index+" *", test.startTime) - assert.Equal(t, len(events), 1) + assert.Len(t, events, 1) // check events fields data, ok := events[0].(map[string]any) assert.True(t, ok, "Invalid event format") @@ -275,7 +275,7 @@ func metricsTest(t *testing.T, config *Config, url *url.URL, test testCfg) { waitForEventToBeIndexed() events := integrationtestutils.CheckMetricsFromSplunk(test.config.index, test.config.event) - assert.Equal(t, len(events), 1, "Events length is less than 1. No metrics found") + assert.Len(t, events, 1, "Events length is less than 1. No metrics found") } func tracesTest(t *testing.T, config *Config, url *url.URL, test testCfg) { @@ -291,7 +291,7 @@ func tracesTest(t *testing.T, config *Config, url *url.URL, test testCfg) { waitForEventToBeIndexed() events := integrationtestutils.CheckEventsFromSplunk("index="+test.config.index+" *", test.startTime) - assert.Equal(t, len(events), 1) + assert.Len(t, events, 1) // check fields data, ok := events[0].(map[string]any) assert.True(t, ok, "Invalid event format") diff --git a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go index 3c388dc9dbcc..4410df27a069 100644 --- a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go @@ -89,7 +89,7 @@ func TestConvertLogs(t *testing.T) { totalLogCount := 10 validLogCount := totalLogCount - 1 gotLogs := convertLogs(createLogData(10)) - assert.Equal(t, len(gotLogs), 9) + assert.Len(t, gotLogs, 9) gotLogPairs := make([][]logKeyValuePair, 0, len(gotLogs)) diff --git a/extension/ackextension/inmemory_test.go b/extension/ackextension/inmemory_test.go index 5d9f4333db74..1b9dc1fe0ee5 100644 --- a/extension/ackextension/inmemory_test.go +++ b/extension/ackextension/inmemory_test.go @@ -83,7 +83,7 @@ func TestExtensionAck_ProcessEvents_Concurrency(t *testing.T) { maps.Copy(map1, map2) maps.Copy(map1, map3) - require.Equal(t, len(map1), 300) + require.Len(t, map1, 300) } func TestExtensionAck_ProcessEvents_EventsUnAcked(t *testing.T) { @@ -104,7 +104,7 @@ func TestExtensionAck_ProcessEvents_EventsUnAcked(t *testing.T) { // non-acked events should be return false for i := 0; i < 100; i++ { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{0, 1, 2}) - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.False(t, result[0]) require.False(t, result[1]) require.False(t, result[2]) @@ -140,13 +140,13 @@ func TestExtensionAck_ProcessEvents_EventsAcked(t *testing.T) { for i := 0; i < 100; i++ { if i%2 == 0 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.False(t, result[1]) require.True(t, result[2]) require.False(t, result[3]) } else { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.True(t, result[1]) require.False(t, result[2]) require.True(t, result[3]) @@ -183,13 +183,13 @@ func TestExtensionAck_QueryAcks_Unidempotent(t *testing.T) { for i := 0; i < 100; i++ { if i%2 == 0 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.False(t, result[1]) require.True(t, result[2]) require.False(t, result[3]) } else { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.True(t, result[1]) require.False(t, result[2]) require.True(t, result[3]) @@ -199,7 +199,7 @@ func TestExtensionAck_QueryAcks_Unidempotent(t *testing.T) { // querying the same acked events should result in false for i := 0; i < 100; i++ { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.False(t, result[1]) require.False(t, result[2]) require.False(t, result[3]) @@ -233,7 +233,7 @@ func TestExtensionAckAsync(t *testing.T) { // non-acked events should be return false for i := 0; i < partitionCount; i++ { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.False(t, result[1]) require.False(t, result[2]) require.False(t, result[3]) @@ -259,13 +259,13 @@ func TestExtensionAckAsync(t *testing.T) { for i := 0; i < partitionCount; i++ { if i%2 == 0 { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.False(t, result[1]) require.True(t, result[2]) require.False(t, result[3]) } else { result := ext.QueryAcks(fmt.Sprintf("part-%d", i), []uint64{1, 2, 3}) - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.True(t, result[1]) require.False(t, result[2]) require.True(t, result[3]) @@ -285,7 +285,7 @@ func TestExtensionAckAsync(t *testing.T) { for i := 0; i < partitionCount; i++ { result := <-resultChan - require.Equal(t, len(result), 3) + require.Len(t, result, 3) require.False(t, result[1]) require.False(t, result[2]) require.False(t, result[3]) diff --git a/extension/observer/ecsobserver/fetcher_test.go b/extension/observer/ecsobserver/fetcher_test.go index 937646415292..e57ab4c4d24a 100644 --- a/extension/observer/ecsobserver/fetcher_test.go +++ b/extension/observer/ecsobserver/fetcher_test.go @@ -65,7 +65,7 @@ func TestFetcher_FetchAndDecorate(t *testing.T) { ctx := context.Background() tasks, err := f.fetchAndDecorate(ctx) require.NoError(t, err) - assert.Equal(t, nTasks, len(tasks)) + assert.Len(t, tasks, nTasks) assert.Equal(t, "s0", aws.StringValue(tasks[0].Service.ServiceArn)) } @@ -78,7 +78,7 @@ func TestFetcher_GetDiscoverableTasks(t *testing.T) { ctx := context.Background() tasks, err := f.getDiscoverableTasks(ctx) require.NoError(t, err) - assert.Equal(t, nTasks, len(tasks)) + assert.Len(t, tasks, nTasks) }) t.Run("with non discoverable tasks", func(t *testing.T) { @@ -106,7 +106,7 @@ func TestFetcher_GetDiscoverableTasks(t *testing.T) { require.NoError(t, err) // Expect 2 tasks, with LaunchType Fargate and EC2 with non-nil ContainerInstanceArn - assert.Equal(t, 2, len(tasks)) + assert.Len(t, tasks, 2) assert.Equal(t, ecs.LaunchTypeFargate, aws.StringValue(tasks[0].LaunchType)) assert.Equal(t, ecs.LaunchTypeEc2, aws.StringValue(tasks[1].LaunchType)) }) @@ -178,7 +178,7 @@ func TestFetcher_AttachContainerInstance(t *testing.T) { ctx := context.Background() rawTasks, err := f.getDiscoverableTasks(ctx) require.NoError(t, err) - assert.Equal(t, nTasks, len(rawTasks)) + assert.Len(t, rawTasks, nTasks) tasks, err := f.attachTaskDefinition(ctx, rawTasks) require.NoError(t, err) @@ -216,7 +216,7 @@ func TestFetcher_AttachContainerInstance(t *testing.T) { ctx := context.Background() rawTasks, err := f.getDiscoverableTasks(ctx) require.NoError(t, err) - assert.Equal(t, nTasks, len(rawTasks)) + assert.Len(t, rawTasks, nTasks) tasks, err := f.attachTaskDefinition(ctx, rawTasks) require.NoError(t, err) @@ -238,7 +238,7 @@ func TestFetcher_GetAllServices(t *testing.T) { ctx := context.Background() services, err := f.getAllServices(ctx) require.NoError(t, err) - assert.Equal(t, nServices, len(services)) + assert.Len(t, services, nServices) } func TestFetcher_AttachService(t *testing.T) { diff --git a/extension/observer/ecsobserver/internal/ecsmock/service_test.go b/extension/observer/ecsobserver/internal/ecsmock/service_test.go index acaec6b13426..222db1d71303 100644 --- a/extension/observer/ecsobserver/internal/ecsmock/service_test.go +++ b/extension/observer/ecsobserver/internal/ecsmock/service_test.go @@ -158,7 +158,7 @@ func TestCluster_DescribeInstancesWithContext(t *testing.T) { req := &ec2.DescribeInstancesInput{InstanceIds: ids} res, err := c.DescribeInstancesWithContext(ctx, req) require.NoError(t, err) - assert.Equal(t, nIDs, len(res.Reservations[0].Instances)) + assert.Len(t, res.Reservations[0].Instances, nIDs) }) t.Run("invalid id", func(t *testing.T) { @@ -198,8 +198,8 @@ func TestCluster_DescribeContainerInstancesWithContext(t *testing.T) { req := &ecs.DescribeContainerInstancesInput{ContainerInstances: ids} res, err := c.DescribeContainerInstancesWithContext(ctx, req) require.NoError(t, err) - assert.Equal(t, nIDs, len(res.ContainerInstances)) - assert.Equal(t, 0, len(res.Failures)) + assert.Len(t, res.ContainerInstances, nIDs) + assert.Len(t, res.Failures, 0) }) t.Run("not found", func(t *testing.T) { diff --git a/extension/observer/hostobserver/extension_test.go b/extension/observer/hostobserver/extension_test.go index 3ac78b6a8a6e..3a82fdeb78b6 100644 --- a/extension/observer/hostobserver/extension_test.go +++ b/extension/observer/hostobserver/extension_test.go @@ -80,7 +80,7 @@ func TestHostObserver(t *testing.T) { t.Run(tt.name, func(t *testing.T) { hostPorts, notifier := tt.setup() if tt.errorListingConnections { - require.Equal(t, len(notifier.endpointsMap), 0) + require.Len(t, notifier.endpointsMap, 0) return } diff --git a/extension/opampextension/registry_test.go b/extension/opampextension/registry_test.go index 2e395d50e24d..d75afea183e7 100644 --- a/extension/opampextension/registry_test.go +++ b/extension/opampextension/registry_test.go @@ -101,7 +101,7 @@ func TestRegistry_ProcessMessage(t *testing.T) { // If we did not skip sending on blocked channels, we'd expect this to never return. registry.ProcessMessage(customMessage) - require.Equal(t, 0, len(sender.Message())) + require.Len(t, sender.Message(), 0) }) t.Run("Callback is called only for its own capability", func(t *testing.T) { diff --git a/extension/storage/filestorage/extension_test.go b/extension/storage/filestorage/extension_test.go index a384423c60e9..1d29bdfb0988 100644 --- a/extension/storage/filestorage/extension_test.go +++ b/extension/storage/filestorage/extension_test.go @@ -326,7 +326,7 @@ func TestCompaction(t *testing.T) { files, err := os.ReadDir(tempDir) require.NoError(t, err) - require.Equal(t, 1, len(files)) + require.Len(t, files, 1) file := files[0] path := filepath.Join(tempDir, file.Name()) @@ -417,7 +417,7 @@ func TestCompactionRemoveTemp(t *testing.T) { // check if only db exists in tempDir files, err := os.ReadDir(tempDir) require.NoError(t, err) - require.Equal(t, 1, len(files)) + require.Len(t, files, 1) fileName := files[0].Name() // perform compaction in the same directory @@ -432,7 +432,7 @@ func TestCompactionRemoveTemp(t *testing.T) { // check if only db exists in tempDir files, err = os.ReadDir(tempDir) require.NoError(t, err) - require.Equal(t, 1, len(files)) + require.Len(t, files, 1) require.Equal(t, fileName, files[0].Name()) // perform compaction in different directory @@ -449,7 +449,7 @@ func TestCompactionRemoveTemp(t *testing.T) { // check if emptyTempDir is empty after compaction files, err = os.ReadDir(emptyTempDir) require.NoError(t, err) - require.Equal(t, 0, len(files)) + require.Len(t, files, 0) } func TestCleanupOnStart(t *testing.T) { @@ -485,7 +485,7 @@ func TestCleanupOnStart(t *testing.T) { files, err := os.ReadDir(tempDir) require.NoError(t, err) - require.Equal(t, 1, len(files)) + require.Len(t, files, 1) } func TestCompactionOnStart(t *testing.T) { diff --git a/extension/storage/storagetest/host_test.go b/extension/storage/storagetest/host_test.go index ab55eef6948e..65e19d536e1a 100644 --- a/extension/storage/storagetest/host_test.go +++ b/extension/storage/storagetest/host_test.go @@ -11,7 +11,7 @@ import ( ) func TestStorageHostWithNone(t *testing.T) { - require.Equal(t, 0, len(NewStorageHost().GetExtensions())) + require.Len(t, NewStorageHost().GetExtensions(), 0) } func TestStorageHostWithOne(t *testing.T) { @@ -20,7 +20,7 @@ func TestStorageHostWithOne(t *testing.T) { host := NewStorageHost().WithInMemoryStorageExtension("one") exts := host.GetExtensions() - require.Equal(t, 1, len(exts)) + require.Len(t, exts, 1) extOne, exists := exts[storageID] require.True(t, exists) @@ -39,7 +39,7 @@ func TestStorageHostWithTwo(t *testing.T) { WithFileBackedStorageExtension("two", t.TempDir()) exts := host.GetExtensions() - require.Equal(t, 2, len(exts)) + require.Len(t, exts, 2) extOne, exists := exts[storageOneID] require.True(t, exists) @@ -67,7 +67,7 @@ func TestStorageHostWithMixed(t *testing.T) { WithNonStorageExtension("non-storage") exts := host.GetExtensions() - require.Equal(t, 3, len(exts)) + require.Len(t, exts, 3) extOne, exists := exts[storageOneID] require.True(t, exists) diff --git a/internal/aws/cwlogs/pusher_test.go b/internal/aws/cwlogs/pusher_test.go index 9b4369798530..57e04022841e 100644 --- a/internal/aws/cwlogs/pusher_test.go +++ b/internal/aws/cwlogs/pusher_test.go @@ -32,7 +32,7 @@ func TestValidateLogEventWithMutating(t *testing.T) { err := logEvent.Validate(zap.NewNop()) assert.NoError(t, err) assert.True(t, *logEvent.InputLogEvent.Timestamp > int64(0)) - assert.Equal(t, 64-perEventHeaderBytes, len(*logEvent.InputLogEvent.Message)) + assert.Len(t, *logEvent.InputLogEvent.Message, 64-perEventHeaderBytes) maxEventPayloadBytes = defaultMaxEventPayloadBytes } @@ -133,7 +133,7 @@ func TestPusher_newLogEventBatch(t *testing.T) { assert.Equal(t, int64(0), logEventBatch.maxTimestampMs) assert.Equal(t, int64(0), logEventBatch.minTimestampMs) assert.Equal(t, 0, logEventBatch.byteTotal) - assert.Equal(t, 0, len(logEventBatch.putLogEventsInput.LogEvents)) + assert.Len(t, logEventBatch.putLogEventsInput.LogEvents, 0) assert.Equal(t, p.logStreamName, logEventBatch.putLogEventsInput.LogStreamName) assert.Equal(t, p.logGroupName, logEventBatch.putLogEventsInput.LogGroupName) assert.Equal(t, (*string)(nil), logEventBatch.putLogEventsInput.SequenceToken) @@ -149,29 +149,29 @@ func TestPusher_addLogEventBatch(t *testing.T) { p.logEventBatch.putLogEventsInput.LogEvents = append(p.logEventBatch.putLogEventsInput.LogEvents, logEvent.InputLogEvent) } - assert.Equal(t, c, len(p.logEventBatch.putLogEventsInput.LogEvents)) + assert.Len(t, p.logEventBatch.putLogEventsInput.LogEvents, c) assert.NotNil(t, p.addLogEvent(logEvent)) // the actual log event add operation happens after the func newLogEventBatchIfNeeded - assert.Equal(t, 1, len(p.logEventBatch.putLogEventsInput.LogEvents)) + assert.Len(t, p.logEventBatch.putLogEventsInput.LogEvents, 1) p.logEventBatch.byteTotal = maxRequestPayloadBytes - logEvent.eventPayloadBytes() + 1 assert.NotNil(t, p.addLogEvent(logEvent)) - assert.Equal(t, 1, len(p.logEventBatch.putLogEventsInput.LogEvents)) + assert.Len(t, p.logEventBatch.putLogEventsInput.LogEvents, 1) p.logEventBatch.minTimestampMs, p.logEventBatch.maxTimestampMs = timestampMs, timestampMs assert.NotNil(t, p.addLogEvent(NewEvent(timestampMs+(time.Hour*24+time.Millisecond*1).Nanoseconds()/1e6, msg))) - assert.Equal(t, 1, len(p.logEventBatch.putLogEventsInput.LogEvents)) + assert.Len(t, p.logEventBatch.putLogEventsInput.LogEvents, 1) assert.Nil(t, p.addLogEvent(nil)) - assert.Equal(t, 1, len(p.logEventBatch.putLogEventsInput.LogEvents)) + assert.Len(t, p.logEventBatch.putLogEventsInput.LogEvents, 1) assert.NotNil(t, p.addLogEvent(logEvent)) - assert.Equal(t, 1, len(p.logEventBatch.putLogEventsInput.LogEvents)) + assert.Len(t, p.logEventBatch.putLogEventsInput.LogEvents, 1) p.logEventBatch.byteTotal = 1 assert.Nil(t, p.addLogEvent(nil)) - assert.Equal(t, 1, len(p.logEventBatch.putLogEventsInput.LogEvents)) + assert.Len(t, p.logEventBatch.putLogEventsInput.LogEvents, 1) } @@ -256,8 +256,8 @@ func TestMultiStreamPusher(t *testing.T) { mockCwAPI.AssertNumberOfCalls(t, "CreateLogStream", 1) mockCwAPI.AssertNumberOfCalls(t, "PutLogEvents", 1) - assert.Equal(t, 1, len(inputs)) - assert.Equal(t, 2, len(inputs[0].LogEvents)) + assert.Len(t, inputs, 1) + assert.Len(t, inputs[0].LogEvents, 2) assert.Equal(t, "foo", *inputs[0].LogGroupName) assert.Equal(t, "bar", *inputs[0].LogStreamName) @@ -272,8 +272,8 @@ func TestMultiStreamPusher(t *testing.T) { mockCwAPI.AssertNumberOfCalls(t, "CreateLogStream", 2) mockCwAPI.AssertNumberOfCalls(t, "PutLogEvents", 2) - assert.Equal(t, 2, len(inputs)) - assert.Equal(t, 1, len(inputs[1].LogEvents)) + assert.Len(t, inputs, 2) + assert.Len(t, inputs[1].LogEvents, 1) assert.Equal(t, "foo", *inputs[1].LogGroupName) assert.Equal(t, "bar2", *inputs[1].LogStreamName) } diff --git a/internal/aws/ecsutil/client_test.go b/internal/aws/ecsutil/client_test.go index 1a4c0e3d9ff9..88903d0677ff 100644 --- a/internal/aws/ecsutil/client_test.go +++ b/internal/aws/ecsutil/client_test.go @@ -30,7 +30,7 @@ func TestClient(t *testing.T) { require.Equal(t, "hello", string(resp)) require.True(t, tr.closed) require.Equal(t, baseURL.String()+"/stats", tr.url) - require.Equal(t, 1, len(tr.header)) + require.Len(t, tr.header, 1) require.Equal(t, "application/json", tr.header["Content-Type"][0]) require.Equal(t, "GET", tr.method) } diff --git a/internal/aws/ecsutil/metadata_provider_test.go b/internal/aws/ecsutil/metadata_provider_test.go index 5cf34e233df4..a3418d75002d 100644 --- a/internal/aws/ecsutil/metadata_provider_test.go +++ b/internal/aws/ecsutil/metadata_provider_test.go @@ -41,7 +41,7 @@ func Test_ecsMetadata_fetchTask(t *testing.T) { assert.Equal(t, "ec2", fetchResp.LaunchType) assert.Equal(t, "us-west-2a", fetchResp.AvailabilityZone) assert.Equal(t, "1", fetchResp.Revision) - assert.Equal(t, 3, len(fetchResp.Containers)) + assert.Len(t, fetchResp.Containers, 3) } func Test_ecsMetadata_fetchContainer(t *testing.T) { diff --git a/internal/aws/k8s/k8sclient/clientset_test.go b/internal/aws/k8s/k8sclient/clientset_test.go index e6b91a4aae7e..607cc0b45088 100644 --- a/internal/aws/k8s/k8sclient/clientset_test.go +++ b/internal/aws/k8s/k8sclient/clientset_test.go @@ -19,7 +19,7 @@ func TestGetShutdown(t *testing.T) { InitSyncPollInterval(10*time.Nanosecond), InitSyncPollTimeout(20*time.Nanosecond), ) - assert.Equal(t, 1, len(optionsToK8sClient)) + assert.Len(t, optionsToK8sClient, 1) assert.NotNil(t, k8sClient.GetClientSet()) assert.NotNil(t, k8sClient.GetEpClient()) assert.NotNil(t, k8sClient.GetJobClient()) @@ -32,6 +32,6 @@ func TestGetShutdown(t *testing.T) { assert.Nil(t, k8sClient.node) assert.Nil(t, k8sClient.pod) assert.Nil(t, k8sClient.replicaSet) - assert.Equal(t, 0, len(optionsToK8sClient)) + assert.Len(t, optionsToK8sClient, 0) removeTempKubeConfig() } diff --git a/internal/aws/k8s/k8sclient/obj_store_test.go b/internal/aws/k8s/k8sclient/obj_store_test.go index 8ffb06fd9b59..1fab30db0b43 100644 --- a/internal/aws/k8s/k8sclient/obj_store_test.go +++ b/internal/aws/k8s/k8sclient/obj_store_test.go @@ -68,7 +68,7 @@ func TestGetList(t *testing.T) { "20036b33-cb03-489b-b778-e516b4dae519": "a", } val := o.List() - assert.Equal(t, 1, len(val)) + assert.Len(t, val, 1) expected := o.objs["20036b33-cb03-489b-b778-e516b4dae519"] assert.Equal(t, expected, val[0]) } @@ -119,7 +119,7 @@ func TestDelete(t *testing.T) { assert.True(t, o.refreshed) keys := o.ListKeys() - assert.Equal(t, 1, len(keys)) + assert.Len(t, keys, 1) assert.Equal(t, "75ab40d2-552a-4c05-82c9-0ddcb3008657", keys[0]) } @@ -174,11 +174,11 @@ func TestUpdate(t *testing.T) { assert.NoError(t, err) keys := o.ListKeys() - assert.Equal(t, 1, len(keys)) + assert.Len(t, keys, 1) assert.Equal(t, "bc5f5839-f62e-44b9-a79e-af250d92dcb1", keys[0]) values := o.List() - assert.Equal(t, 1, len(values)) + assert.Len(t, values, 1) assert.Equal(t, updatedObj, values[0]) } diff --git a/internal/aws/xray/telemetry/sender_test.go b/internal/aws/xray/telemetry/sender_test.go index 9ec4a54cec6e..877b1ccf33d0 100644 --- a/internal/aws/xray/telemetry/sender_test.go +++ b/internal/aws/xray/telemetry/sender_test.go @@ -113,10 +113,10 @@ func TestQueueOverflow(t *testing.T) { } // number of dropped records assert.Equal(t, 5, logs.Len()) - assert.Equal(t, 20, len(sender.queue)) + assert.Len(t, sender.queue, 20) sender.send() // only one batch succeeded - assert.Equal(t, 15, len(sender.queue)) + assert.Len(t, sender.queue, 15) // verify that sent back of queue for _, record := range sender.queue { assert.Greater(t, *record.SegmentsSentCount, int64(5)) diff --git a/internal/common/testutil/testutil.go b/internal/common/testutil/testutil.go index 1f958995d530..3e69230cf6e0 100644 --- a/internal/common/testutil/testutil.go +++ b/internal/common/testutil/testutil.go @@ -110,14 +110,14 @@ func createExclusionsList(t testing.TB, exclusionsText string) []portpair { var exclusions []portpair parts := strings.Split(exclusionsText, "--------") - require.Equal(t, len(parts), 3) + require.Len(t, parts, 3) portsText := strings.Split(parts[2], "*") require.Greater(t, len(portsText), 1) // original text may have a suffix like " - Administered port exclusions." lines := strings.Split(portsText[0], "\n") for _, line := range lines { if strings.TrimSpace(line) != "" { entries := strings.Fields(strings.TrimSpace(line)) - require.Equal(t, len(entries), 2) + require.Len(t, entries, 2) pair := portpair{entries[0], entries[1]} exclusions = append(exclusions, pair) } diff --git a/internal/common/testutil/testutil_test.go b/internal/common/testutil/testutil_test.go index f6cb1eb1c7f6..a365622ebd71 100644 --- a/internal/common/testutil/testutil_test.go +++ b/internal/common/testutil/testutil_test.go @@ -63,8 +63,8 @@ Start Port End Port * - Administered port exclusions. ` exclusions := createExclusionsList(t, exclusionsText) - require.Equal(t, len(exclusions), 2) + require.Len(t, exclusions, 2) emptyExclusions := createExclusionsList(t, emptyExclusionsText) - require.Equal(t, len(emptyExclusions), 0) + require.Len(t, emptyExclusions, 0) } diff --git a/internal/coreinternal/consumerretry/logs_test.go b/internal/coreinternal/consumerretry/logs_test.go index f9a34a96893f..c4e6321b638e 100644 --- a/internal/coreinternal/consumerretry/logs_test.go +++ b/internal/coreinternal/consumerretry/logs_test.go @@ -65,7 +65,7 @@ func TestConsumeLogs(t *testing.T) { err := consumer.ConsumeLogs(context.Background(), testdata.GenerateLogsTwoLogRecordsSameResource()) assert.Equal(t, tt.expectedErr, err) if err == nil { - assert.Equal(t, 1, len(tt.consumer.AllLogs())) + assert.Len(t, tt.consumer.AllLogs(), 1) assert.Equal(t, 2, tt.consumer.AllLogs()[0].LogRecordCount()) if tt.consumer.acceptAfter > 0 { assert.Equal(t, tt.consumer.rejectCount.Load(), tt.consumer.acceptAfter) @@ -106,7 +106,7 @@ func TestConsumeLogs_PartialRetry(t *testing.T) { assert.NoError(t, consumer.ConsumeLogs(context.Background(), logs)) // Verify the logs batch is broken into two parts, one with the partial error and one without. - assert.Equal(t, 2, len(sink.AllLogs())) + assert.Len(t, sink.AllLogs(), 2) assert.Equal(t, 1, sink.AllLogs()[0].ResourceLogs().Len()) assert.Equal(t, 2, sink.AllLogs()[0].LogRecordCount()) assert.Equal(t, 1, sink.AllLogs()[1].ResourceLogs().Len()) diff --git a/internal/coreinternal/goldendataset/pict_metrics_gen_test.go b/internal/coreinternal/goldendataset/pict_metrics_gen_test.go index 2a30fe5b7558..47ec630fd535 100644 --- a/internal/coreinternal/goldendataset/pict_metrics_gen_test.go +++ b/internal/coreinternal/goldendataset/pict_metrics_gen_test.go @@ -14,7 +14,7 @@ import ( func TestGenerateMetricDatas(t *testing.T) { mds, err := GenerateMetrics("testdata/generated_pict_pairs_metrics.txt") require.NoError(t, err) - require.Equal(t, 25, len(mds)) + require.Len(t, mds, 25) } func TestPICTtoCfg(t *testing.T) { diff --git a/internal/coreinternal/goldendataset/traces_generator_test.go b/internal/coreinternal/goldendataset/traces_generator_test.go index f7a9be8061ae..c018d3affe83 100644 --- a/internal/coreinternal/goldendataset/traces_generator_test.go +++ b/internal/coreinternal/goldendataset/traces_generator_test.go @@ -13,5 +13,5 @@ func TestGenerateTraces(t *testing.T) { rscSpans, err := GenerateTraces("testdata/generated_pict_pairs_traces.txt", "testdata/generated_pict_pairs_spans.txt") assert.NoError(t, err) - assert.Equal(t, 32, len(rscSpans)) + assert.Len(t, rscSpans, 32) } diff --git a/internal/docker/docker_test.go b/internal/docker/docker_test.go index e5d5185b16cf..9a8b0bfee436 100644 --- a/internal/docker/docker_test.go +++ b/internal/docker/docker_test.go @@ -72,7 +72,7 @@ func TestWatchingTimeouts(t *testing.T) { cnt, ofInterest := cli.inspectedContainerIsOfInterest(context.Background(), "SomeContainerId") assert.False(t, ofInterest) assert.Nil(t, cnt) - assert.Equal(t, 1, len(logs.All())) + assert.Len(t, logs.All(), 1) for _, l := range logs.All() { assert.Contains(t, l.ContextMap()["error"], expectedError) } @@ -124,7 +124,7 @@ func TestFetchingTimeouts(t *testing.T) { assert.Contains(t, err.Error(), expectedError) - assert.Equal(t, 1, len(logs.All())) + assert.Len(t, logs.All(), 1) for _, l := range logs.All() { assert.Contains(t, l.ContextMap()["error"], expectedError) } diff --git a/internal/filter/filterset/regexp/regexpfilterset_test.go b/internal/filter/filterset/regexp/regexpfilterset_test.go index cfb616430662..6e8dfbf4ff6e 100644 --- a/internal/filter/filterset/regexp/regexpfilterset_test.go +++ b/internal/filter/filterset/regexp/regexpfilterset_test.go @@ -109,7 +109,7 @@ func TestRegexpDeDup(t *testing.T) { require.NoError(t, err) assert.NotNil(t, fs) assert.Nil(t, fs.cache) - assert.EqualValues(t, 1, len(fs.regexes)) + assert.Len(t, fs.regexes, 1) } func TestRegexpMatchesCaches(t *testing.T) { diff --git a/internal/kubelet/client_test.go b/internal/kubelet/client_test.go index 0e8ce71caab2..5c7aa63d6681 100644 --- a/internal/kubelet/client_test.go +++ b/internal/kubelet/client_test.go @@ -45,7 +45,7 @@ func TestClient(t *testing.T) { require.Equal(t, "hello", string(resp)) require.True(t, tr.closed) require.Equal(t, baseURL+"/foo", tr.url) - require.Equal(t, 1, len(tr.header)) + require.Len(t, tr.header, 1) require.Equal(t, "application/json", tr.header["Content-Type"][0]) require.Equal(t, "GET", tr.method) } @@ -66,7 +66,7 @@ func TestNewTLSClientProvider(t *testing.T) { require.NoError(t, err) c := client.(*clientImpl) tcc := c.httpClient.Transport.(*http.Transport).TLSClientConfig - require.Equal(t, 1, len(tcc.Certificates)) + require.Len(t, tcc.Certificates, 1) require.NotNil(t, tcc.RootCAs) } diff --git a/internal/otelarrow/testutil/testutil.go b/internal/otelarrow/testutil/testutil.go index 1f75806356e6..1943ff6e9e03 100644 --- a/internal/otelarrow/testutil/testutil.go +++ b/internal/otelarrow/testutil/testutil.go @@ -86,14 +86,14 @@ func createExclusionsList(exclusionsText string, t testing.TB) []portpair { var exclusions []portpair parts := strings.Split(exclusionsText, "--------") - require.Equal(t, len(parts), 3) + require.Len(t, parts, 3) portsText := strings.Split(parts[2], "*") require.Greater(t, len(portsText), 1) // original text may have a suffix like " - Administered port exclusions." lines := strings.Split(portsText[0], "\n") for _, line := range lines { if strings.TrimSpace(line) != "" { entries := strings.Fields(strings.TrimSpace(line)) - require.Equal(t, len(entries), 2) + require.Len(t, entries, 2) pair := portpair{entries[0], entries[1]} exclusions = append(exclusions, pair) } diff --git a/internal/otelarrow/testutil/testutil_test.go b/internal/otelarrow/testutil/testutil_test.go index 05b38a1de6ac..c3cdfad36ff4 100644 --- a/internal/otelarrow/testutil/testutil_test.go +++ b/internal/otelarrow/testutil/testutil_test.go @@ -50,8 +50,8 @@ Start Port End Port * - Administered port exclusions. ` exclusions := createExclusionsList(exclusionsText, t) - require.Equal(t, len(exclusions), 2) + require.Len(t, exclusions, 2) emptyExclusions := createExclusionsList(emptyExclusionsText, t) - require.Equal(t, len(emptyExclusions), 0) + require.Len(t, emptyExclusions, 0) } diff --git a/pkg/ottl/functions_test.go b/pkg/ottl/functions_test.go index b2210c36654b..cc9905f08e5f 100644 --- a/pkg/ottl/functions_test.go +++ b/pkg/ottl/functions_test.go @@ -2247,7 +2247,7 @@ func Test_basePath_Keys(t *testing.T) { }, } ks := bp.Keys() - assert.Equal(t, 1, len(ks)) + assert.Len(t, ks, 1) assert.Equal(t, k, ks[0]) } @@ -2375,7 +2375,7 @@ func Test_newPath(t *testing.T) { assert.Equal(t, "string", p.Name()) assert.Equal(t, "body.string[key]", p.String()) assert.Nil(t, p.Next()) - assert.Equal(t, 1, len(p.Keys())) + assert.Len(t, p.Keys(), 1) v, err := p.Keys()[0].String(context.Background(), struct{}{}) assert.NoError(t, err) assert.Equal(t, "key", *v) @@ -2415,7 +2415,7 @@ func Test_newKey(t *testing.T) { } ks := newKeys[any](keys) - assert.Equal(t, 2, len(ks)) + assert.Len(t, ks, 2) s, err := ks[0].String(context.Background(), nil) assert.NoError(t, err) diff --git a/pkg/stanza/adapter/frompdataconverter_test.go b/pkg/stanza/adapter/frompdataconverter_test.go index a60b32684e4f..4ddcc25e533d 100644 --- a/pkg/stanza/adapter/frompdataconverter_test.go +++ b/pkg/stanza/adapter/frompdataconverter_test.go @@ -155,7 +155,7 @@ func BenchmarkFromPdataConverter(b *testing.B) { break forLoop } - require.Equal(b, 250_000, len(entries)) + require.Len(b, entries, 250_000) n += len(entries) case <-timeoutTimer.C: diff --git a/pkg/stanza/fileconsumer/attrs/attrs_test.go b/pkg/stanza/fileconsumer/attrs/attrs_test.go index b714975d460b..b5a79cfeb364 100644 --- a/pkg/stanza/fileconsumer/attrs/attrs_test.go +++ b/pkg/stanza/fileconsumer/attrs/attrs_test.go @@ -86,7 +86,7 @@ func TestResolver(t *testing.T) { assert.Empty(t, attributes[LogFileOwnerGroupName]) assert.Empty(t, attributes[LogFileOwnerGroupName]) } - assert.Equal(t, expectLen, len(attributes)) + assert.Len(t, attributes, expectLen) }) } } diff --git a/pkg/stanza/fileconsumer/file_test.go b/pkg/stanza/fileconsumer/file_test.go index 4769b8621063..b04968a0d5f4 100644 --- a/pkg/stanza/fileconsumer/file_test.go +++ b/pkg/stanza/fileconsumer/file_test.go @@ -1351,7 +1351,7 @@ func TestStalePartialFingerprintDiscarded(t *testing.T) { operator.wg.Wait() if runtime.GOOS != "windows" { // On windows, we never keep files in previousPollFiles, so we don't expect to see them here - require.Equal(t, len(operator.tracker.PreviousPollFiles()), 1) + require.Len(t, operator.tracker.PreviousPollFiles(), 1) } // keep append data to file1 and file2 diff --git a/pkg/stanza/fileconsumer/internal/fingerprint/fingerprint_test.go b/pkg/stanza/fileconsumer/internal/fingerprint/fingerprint_test.go index 78f844ef54ac..6d2141fbf2e7 100644 --- a/pkg/stanza/fileconsumer/internal/fingerprint/fingerprint_test.go +++ b/pkg/stanza/fileconsumer/internal/fingerprint/fingerprint_test.go @@ -134,7 +134,7 @@ func TestNewFromFile(t *testing.T) { fp, err := NewFromFile(temp, tc.fingerprintSize) require.NoError(t, err) - require.Equal(t, tc.expectedLen, len(fp.firstBytes)) + require.Len(t, fp.firstBytes, tc.expectedLen) }) } } diff --git a/pkg/stanza/fileconsumer/internal/reader/fingerprint_test.go b/pkg/stanza/fileconsumer/internal/reader/fingerprint_test.go index f6ce84ee584e..877a9c23e1cd 100644 --- a/pkg/stanza/fileconsumer/internal/reader/fingerprint_test.go +++ b/pkg/stanza/fileconsumer/internal/reader/fingerprint_test.go @@ -296,7 +296,7 @@ func (tc updateFingerprintTest) run(bufferSize int) func(*testing.T) { i, err := temp.Write(tc.moreBytes) require.NoError(t, err) - require.Equal(t, i, len(tc.moreBytes)) + require.Len(t, tc.moreBytes, i) r.ReadToEnd(context.Background()) diff --git a/pkg/stanza/operator/helper/emitter_test.go b/pkg/stanza/operator/helper/emitter_test.go index 843415e44776..e8c11e6d05e1 100644 --- a/pkg/stanza/operator/helper/emitter_test.go +++ b/pkg/stanza/operator/helper/emitter_test.go @@ -63,7 +63,7 @@ func TestLogEmitterEmitsOnMaxBatchSize(t *testing.T) { select { case recv := <-emitter.logChan: - require.Equal(t, maxBatchSize, len(recv), "Length of received entries was not the same as max batch size!") + require.Len(t, recv, maxBatchSize, "Length of received entries was not the same as max batch size!") case <-timeoutChan: require.FailNow(t, "Failed to receive log entries before timeout") } @@ -92,7 +92,7 @@ func TestLogEmitterEmitsOnFlushInterval(t *testing.T) { select { case recv := <-emitter.logChan: - require.Equal(t, 1, len(recv), "Should have received one entry, got %d instead", len(recv)) + require.Len(t, recv, 1, "Should have received one entry, got %d instead", len(recv)) case <-timeoutChan: require.FailNow(t, "Failed to receive log entry before timeout") } diff --git a/pkg/stanza/operator/input/windows/buffer_test.go b/pkg/stanza/operator/input/windows/buffer_test.go index 64106a832eef..f8376706ec78 100644 --- a/pkg/stanza/operator/input/windows/buffer_test.go +++ b/pkg/stanza/operator/input/windows/buffer_test.go @@ -46,13 +46,13 @@ func TestBufferReadString(t *testing.T) { func TestBufferUpdateSize(t *testing.T) { buffer := NewBuffer() buffer.UpdateSizeBytes(1) - require.Equal(t, 1, len(buffer.buffer)) + require.Len(t, buffer.buffer, 1) } func TestBufferUpdateSizeWide(t *testing.T) { buffer := NewBuffer() buffer.UpdateSizeWide(1) - require.Equal(t, 2, len(buffer.buffer)) + require.Len(t, buffer.buffer, 2) } func TestBufferSize(t *testing.T) { diff --git a/pkg/stanza/operator/transformer/recombine/transformer_test.go b/pkg/stanza/operator/transformer/recombine/transformer_test.go index d78931741646..489d7ac723a0 100644 --- a/pkg/stanza/operator/transformer/recombine/transformer_test.go +++ b/pkg/stanza/operator/transformer/recombine/transformer_test.go @@ -950,9 +950,9 @@ func TestSourceBatchDelete(t *testing.T) { ctx := context.Background() require.NoError(t, recombine.Process(ctx, start)) - require.Equal(t, 1, len(recombine.batchMap)) + require.Len(t, recombine.batchMap, 1) require.NoError(t, recombine.Process(ctx, next)) - require.Equal(t, 0, len(recombine.batchMap)) + require.Len(t, recombine.batchMap, 0) fake.ExpectEntry(t, expect) require.NoError(t, recombine.Stop()) } diff --git a/pkg/stanza/pipeline/config_test.go b/pkg/stanza/pipeline/config_test.go index 731796238b25..b82bc2d2c895 100644 --- a/pkg/stanza/pipeline/config_test.go +++ b/pkg/stanza/pipeline/config_test.go @@ -29,7 +29,7 @@ func TestBuildPipelineSuccess(t *testing.T) { set := componenttest.NewNopTelemetrySettings() pipe, err := cfg.Build(set) require.NoError(t, err) - require.Equal(t, 1, len(pipe.Operators())) + require.Len(t, pipe.Operators(), 1) } func TestBuildPipelineNoLogger(t *testing.T) { @@ -86,22 +86,22 @@ func TestBuildAPipelineDefaultOperator(t *testing.T) { require.NoError(t, err) ops := pipe.Operators() - require.Equal(t, 3, len(ops)) + require.Len(t, ops, 3) exists := make(map[string]bool) for _, op := range ops { switch op.ID() { case "noop": - require.Equal(t, 1, len(op.GetOutputIDs())) + require.Len(t, op.GetOutputIDs(), 1) require.Equal(t, "noop1", op.GetOutputIDs()[0]) exists["noop"] = true case "noop1": - require.Equal(t, 1, len(op.GetOutputIDs())) + require.Len(t, op.GetOutputIDs(), 1) require.Equal(t, "fake", op.GetOutputIDs()[0]) exists["noop1"] = true case "fake": - require.Equal(t, 0, len(op.GetOutputIDs())) + require.Len(t, op.GetOutputIDs(), 0) exists["fake"] = true } } @@ -375,7 +375,7 @@ func TestUpdateOutputIDs(t *testing.T) { if tc.defaultOut != nil { expectedNumOps++ } - require.Equal(t, expectedNumOps, len(ops)) + require.Len(t, ops, expectedNumOps) for i := 0; i < len(ops); i++ { id := ops[i].ID() diff --git a/pkg/translator/jaeger/traces_to_jaegerproto_test.go b/pkg/translator/jaeger/traces_to_jaegerproto_test.go index 20d8ee1e3d00..d996b23c3465 100644 --- a/pkg/translator/jaeger/traces_to_jaegerproto_test.go +++ b/pkg/translator/jaeger/traces_to_jaegerproto_test.go @@ -335,7 +335,7 @@ func TestInternalTracesToJaegerProto(t *testing.T) { if test.jb == nil { assert.Len(t, jbs, 0) } else { - require.Equal(t, 1, len(jbs)) + require.Len(t, jbs, 1) assert.EqualValues(t, test.jb, jbs[0]) } }) diff --git a/pkg/translator/opencensus/traces_to_oc_test.go b/pkg/translator/opencensus/traces_to_oc_test.go index 8d0293441937..9397b9231572 100644 --- a/pkg/translator/opencensus/traces_to_oc_test.go +++ b/pkg/translator/opencensus/traces_to_oc_test.go @@ -345,7 +345,7 @@ func TestInternalTracesToOCTracesAndBack(t *testing.T) { assert.NoError(t, err) for _, td := range tds { ocNode, ocResource, ocSpans := ResourceSpansToOC(td.ResourceSpans().At(0)) - assert.Equal(t, td.SpanCount(), len(ocSpans)) + assert.Len(t, ocSpans, td.SpanCount()) tdFromOC := OCToTraces(ocNode, ocResource, ocSpans) assert.NotNil(t, tdFromOC) assert.Equal(t, td.SpanCount(), tdFromOC.SpanCount()) diff --git a/pkg/translator/signalfx/to_metrics_test.go b/pkg/translator/signalfx/to_metrics_test.go index 9b2e95676346..8c644e1b293a 100644 --- a/pkg/translator/signalfx/to_metrics_test.go +++ b/pkg/translator/signalfx/to_metrics_test.go @@ -225,7 +225,7 @@ func TestToMetrics(t *testing.T) { targetLen := 2*len(pt.Dimensions) + 1 dimensions := make([]*sfxpb.Dimension, targetLen) copy(dimensions[1:], pt.Dimensions) - assert.Equal(t, targetLen, len(dimensions)) + assert.Len(t, dimensions, targetLen) assert.Nil(t, dimensions[0]) pt.Dimensions = dimensions return []*sfxpb.DataPoint{pt} diff --git a/pkg/translator/skywalking/skywalkingproto_to_traces_test.go b/pkg/translator/skywalking/skywalkingproto_to_traces_test.go index 1980a1423a3a..429be93f9c92 100644 --- a/pkg/translator/skywalking/skywalkingproto_to_traces_test.go +++ b/pkg/translator/skywalking/skywalkingproto_to_traces_test.go @@ -69,7 +69,7 @@ func TestSwKvPairsToInternalAttributes(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { swKvPairsToInternalAttributes(test.swSpan.GetSpans()[0].Tags, test.dest.Attributes()) - assert.Equal(t, test.dest.Attributes().Len(), len(test.swSpan.GetSpans()[0].Tags)) + assert.Len(t, test.swSpan.GetSpans()[0].Tags, test.dest.Attributes().Len()) for _, tag := range test.swSpan.GetSpans()[0].Tags { value, _ := test.dest.Attributes().Get(tag.Key) assert.Equal(t, tag.Value, value.AsString()) diff --git a/pkg/translator/zipkin/zipkinv2/from_translator_test.go b/pkg/translator/zipkin/zipkinv2/from_translator_test.go index 95cd143597ec..2a00337a355a 100644 --- a/pkg/translator/zipkin/zipkinv2/from_translator_test.go +++ b/pkg/translator/zipkin/zipkinv2/from_translator_test.go @@ -95,7 +95,7 @@ func TestInternalTracesToZipkinSpansAndBack(t *testing.T) { for _, td := range tds { zipkinSpans, err := FromTranslator{}.FromTraces(td) assert.NoError(t, err) - assert.Equal(t, td.SpanCount(), len(zipkinSpans)) + assert.Len(t, zipkinSpans, td.SpanCount()) tdFromZS, zErr := ToTranslator{}.ToTraces(zipkinSpans) assert.NoError(t, zErr, zipkinSpans) assert.NotNil(t, tdFromZS) diff --git a/pkg/winperfcounters/watcher_test.go b/pkg/winperfcounters/watcher_test.go index 33c8c1969a46..0f671916288b 100644 --- a/pkg/winperfcounters/watcher_test.go +++ b/pkg/winperfcounters/watcher_test.go @@ -158,7 +158,7 @@ func TestPerfCounter_ScrapeData(t *testing.T) { name: "total instance", path: `\LogicalDisk(_Total)\Free Megabytes`, assertExpected: func(t *testing.T, data []CounterValue) { - assert.Equal(t, 1, len(data)) + assert.Len(t, data, 1) assert.Empty(t, data[0].InstanceName) }, }, diff --git a/processor/cumulativetodeltaprocessor/processor_test.go b/processor/cumulativetodeltaprocessor/processor_test.go index 53441c254cdf..d2a316a7767c 100644 --- a/processor/cumulativetodeltaprocessor/processor_test.go +++ b/processor/cumulativetodeltaprocessor/processor_test.go @@ -465,7 +465,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { assert.Nil(t, cErr) got := next.AllMetrics() - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) require.Equal(t, test.outMetrics.ResourceMetrics().Len(), got[0].ResourceMetrics().Len()) expectedMetrics := test.outMetrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() diff --git a/processor/deltatorateprocessor/processor_test.go b/processor/deltatorateprocessor/processor_test.go index deaf4b147e7c..f4ae2eef3bb1 100644 --- a/processor/deltatorateprocessor/processor_test.go +++ b/processor/deltatorateprocessor/processor_test.go @@ -137,7 +137,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { assert.Nil(t, cErr) got := next.AllMetrics() - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) require.Equal(t, test.outMetrics.ResourceMetrics().Len(), got[0].ResourceMetrics().Len()) expectedMetrics := test.outMetrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() diff --git a/processor/filterprocessor/metrics_test.go b/processor/filterprocessor/metrics_test.go index 84df59523c6d..3cb8c077bb81 100644 --- a/processor/filterprocessor/metrics_test.go +++ b/processor/filterprocessor/metrics_test.go @@ -347,11 +347,11 @@ func TestFilterMetricProcessor(t *testing.T) { got := next.AllMetrics() if len(test.outMN) == 0 { - require.Equal(t, 0, len(got)) + require.Len(t, got, 0) return } - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) require.Equal(t, len(test.outMN), got[0].ResourceMetrics().Len()) for i, wantOut := range test.outMN { gotMetrics := got[0].ResourceMetrics().At(i).ScopeMetrics().At(0).Metrics() diff --git a/processor/filterprocessor/traces_test.go b/processor/filterprocessor/traces_test.go index 520775f5a621..b37dbfafc67b 100644 --- a/processor/filterprocessor/traces_test.go +++ b/processor/filterprocessor/traces_test.go @@ -148,7 +148,7 @@ func TestFilterTraceProcessor(t *testing.T) { // If all traces got filtered you shouldn't even have ResourceSpans if test.allTracesFiltered { - require.Equal(t, 0, len(got)) + require.Len(t, got, 0) } else { require.Equal(t, test.spanCountExpected, got[0].SpanCount()) } diff --git a/processor/geoipprocessor/geoip_processor_test.go b/processor/geoipprocessor/geoip_processor_test.go index cf94294d41aa..32969f69917b 100644 --- a/processor/geoipprocessor/geoip_processor_test.go +++ b/processor/geoipprocessor/geoip_processor_test.go @@ -145,7 +145,7 @@ func compareAllSignals(cfg component.Config, goldenDir string) func(t *testing.T require.NoError(t, err) actualMetrics := nextMetrics.AllMetrics() - require.Equal(t, 1, len(actualMetrics)) + require.Len(t, actualMetrics, 1) // golden.WriteMetrics(t, filepath.Join(dir, "output-metrics.yaml"), actualMetrics[0]) require.NoError(t, pmetrictest.CompareMetrics(expectedMetrics, actualMetrics[0])) @@ -164,7 +164,7 @@ func compareAllSignals(cfg component.Config, goldenDir string) func(t *testing.T require.NoError(t, err) actualTraces := nextTraces.AllTraces() - require.Equal(t, 1, len(actualTraces)) + require.Len(t, actualTraces, 1) // golden.WriteTraces(t, filepath.Join(dir, "output-traces.yaml"), actualTraces[0]) require.NoError(t, ptracetest.CompareTraces(expectedTraces, actualTraces[0])) @@ -183,7 +183,7 @@ func compareAllSignals(cfg component.Config, goldenDir string) func(t *testing.T require.NoError(t, err) actualLogs := nextLogs.AllLogs() - require.Equal(t, 1, len(actualLogs)) + require.Len(t, actualLogs, 1) // golden.WriteLogs(t, filepath.Join(dir, "output-logs.yaml"), actualLogs[0]) require.NoError(t, plogtest.CompareLogs(expectedLogs, actualLogs[0])) } diff --git a/processor/groupbytraceprocessor/processor_test.go b/processor/groupbytraceprocessor/processor_test.go index 4e65362e5599..1a9056eadf0c 100644 --- a/processor/groupbytraceprocessor/processor_test.go +++ b/processor/groupbytraceprocessor/processor_test.go @@ -126,7 +126,7 @@ func TestInternalCacheLimit(t *testing.T) { wg.Wait() // verify - assert.Equal(t, 5, len(receivedTraceIDs)) + assert.Len(t, receivedTraceIDs, 5) for i := 5; i > 0; i-- { // last 5 traces traceID := pcommon.TraceID(traceIDs[i]) diff --git a/processor/k8sattributesprocessor/internal/kube/client_test.go b/processor/k8sattributesprocessor/internal/kube/client_test.go index de6f1dace5d6..7a81b7ce46ec 100644 --- a/processor/k8sattributesprocessor/internal/kube/client_test.go +++ b/processor/k8sattributesprocessor/internal/kube/client_test.go @@ -47,18 +47,18 @@ func newPodIdentifier(from string, name string, value string) PodIdentifier { } func podAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { - assert.Equal(t, 0, len(c.Pods)) + assert.Len(t, c.Pods, 0) // pod without IP pod := &api_v1.Pod{} handler(pod) - assert.Equal(t, 0, len(c.Pods)) + assert.Len(t, c.Pods, 0) pod = &api_v1.Pod{} pod.Name = "podA" pod.Status.PodIP = "1.1.1.1" handler(pod) - assert.Equal(t, 2, len(c.Pods)) + assert.Len(t, c.Pods, 2) got := c.Pods[newPodIdentifier("connection", "k8s.pod.ip", "1.1.1.1")] assert.Equal(t, "1.1.1.1", got.Address) assert.Equal(t, "podA", got.Name) @@ -68,7 +68,7 @@ func podAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { pod.Name = "podB" pod.Status.PodIP = "1.1.1.1" handler(pod) - assert.Equal(t, 2, len(c.Pods)) + assert.Len(t, c.Pods, 2) got = c.Pods[newPodIdentifier("connection", "k8s.pod.ip", "1.1.1.1")] assert.Equal(t, "1.1.1.1", got.Address) assert.Equal(t, "podB", got.Name) @@ -79,7 +79,7 @@ func podAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { pod.Status.PodIP = "2.2.2.2" pod.UID = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" handler(pod) - assert.Equal(t, 5, len(c.Pods)) + assert.Len(t, c.Pods, 5) got = c.Pods[newPodIdentifier("connection", "k8s.pod.ip", "2.2.2.2")] assert.Equal(t, "2.2.2.2", got.Address) assert.Equal(t, "podC", got.Name) @@ -92,16 +92,16 @@ func podAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { } func namespaceAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { - assert.Equal(t, 0, len(c.Namespaces)) + assert.Len(t, c.Namespaces, 0) namespace := &api_v1.Namespace{} handler(namespace) - assert.Equal(t, 0, len(c.Namespaces)) + assert.Len(t, c.Namespaces, 0) namespace = &api_v1.Namespace{} namespace.Name = "namespaceA" handler(namespace) - assert.Equal(t, 1, len(c.Namespaces)) + assert.Len(t, c.Namespaces, 1) got := c.Namespaces["namespaceA"] assert.Equal(t, "namespaceA", got.Name) assert.Equal(t, "", got.NamespaceUID) @@ -110,23 +110,23 @@ func namespaceAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj an namespace.Name = "namespaceB" namespace.UID = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" handler(namespace) - assert.Equal(t, 2, len(c.Namespaces)) + assert.Len(t, c.Namespaces, 2) got = c.Namespaces["namespaceB"] assert.Equal(t, "namespaceB", got.Name) assert.Equal(t, "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", got.NamespaceUID) } func nodeAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { - assert.Equal(t, 0, len(c.Nodes)) + assert.Len(t, c.Nodes, 0) node := &api_v1.Node{} handler(node) - assert.Equal(t, 0, len(c.Nodes)) + assert.Len(t, c.Nodes, 0) node = &api_v1.Node{} node.Name = "nodeA" handler(node) - assert.Equal(t, 1, len(c.Nodes)) + assert.Len(t, c.Nodes, 1) got, ok := c.GetNode("nodeA") assert.True(t, ok) assert.Equal(t, "nodeA", got.Name) @@ -136,7 +136,7 @@ func nodeAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { node.Name = "nodeB" node.UID = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" handler(node) - assert.Equal(t, 2, len(c.Nodes)) + assert.Len(t, c.Nodes, 2) got, ok = c.GetNode("nodeB") assert.True(t, ok) assert.Equal(t, "nodeB", got.Name) @@ -227,11 +227,11 @@ func TestNodeAdd(t *testing.T) { func TestReplicaSetHandler(t *testing.T) { c, _ := newTestClient(t) - assert.Equal(t, len(c.ReplicaSets), 0) + assert.Len(t, c.ReplicaSets, 0) replicaset := &apps_v1.ReplicaSet{} c.handleReplicaSetAdd(replicaset) - assert.Equal(t, len(c.ReplicaSets), 0) + assert.Len(t, c.ReplicaSets, 0) // test add replicaset replicaset = &apps_v1.ReplicaSet{} @@ -256,7 +256,7 @@ func TestReplicaSetHandler(t *testing.T) { }, } c.handleReplicaSetAdd(replicaset) - assert.Equal(t, len(c.ReplicaSets), 1) + assert.Len(t, c.ReplicaSets, 1) got := c.ReplicaSets[string(replicaset.UID)] assert.Equal(t, got.Name, "deployment-aaa") assert.Equal(t, got.Namespace, "namespaceA") @@ -270,7 +270,7 @@ func TestReplicaSetHandler(t *testing.T) { updatedReplicaset := replicaset updatedReplicaset.ResourceVersion = "444444" c.handleReplicaSetUpdate(replicaset, updatedReplicaset) - assert.Equal(t, len(c.ReplicaSets), 1) + assert.Len(t, c.ReplicaSets, 1) got = c.ReplicaSets[string(replicaset.UID)] assert.Equal(t, got.Name, "deployment-aaa") assert.Equal(t, got.Namespace, "namespaceA") @@ -282,20 +282,20 @@ func TestReplicaSetHandler(t *testing.T) { // test delete replicaset c.handleReplicaSetDelete(updatedReplicaset) - assert.Equal(t, len(c.ReplicaSets), 0) + assert.Len(t, c.ReplicaSets, 0) // test delete replicaset when DeletedFinalStateUnknown c.handleReplicaSetAdd(replicaset) - require.Equal(t, len(c.ReplicaSets), 1) + require.Len(t, c.ReplicaSets, 1) c.handleReplicaSetDelete(cache.DeletedFinalStateUnknown{ Obj: replicaset, }) - assert.Equal(t, len(c.ReplicaSets), 0) + assert.Len(t, c.ReplicaSets, 0) } func TestPodHostNetwork(t *testing.T) { c, _ := newTestClient(t) - assert.Equal(t, 0, len(c.Pods)) + assert.Len(t, c.Pods, 0) // pod will not be added if no rule matches pod := &api_v1.Pod{} @@ -303,7 +303,7 @@ func TestPodHostNetwork(t *testing.T) { pod.Status.PodIP = "1.1.1.1" pod.Spec.HostNetwork = true c.handlePodAdd(pod) - assert.Equal(t, 0, len(c.Pods)) + assert.Len(t, c.Pods, 0) // pod will be added if rule matches pod.Name = "podB" @@ -311,7 +311,7 @@ func TestPodHostNetwork(t *testing.T) { pod.UID = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" pod.Spec.HostNetwork = true c.handlePodAdd(pod) - assert.Equal(t, 1, len(c.Pods)) + assert.Len(t, c.Pods, 1) got := c.Pods[newPodIdentifier("resource_attribute", "k8s.pod.uid", "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")] assert.Equal(t, "2.2.2.2", got.Address) assert.Equal(t, "podB", got.Name) @@ -323,14 +323,14 @@ func TestPodHostNetwork(t *testing.T) { // correctly func TestPodCreate(t *testing.T) { c, _ := newTestClient(t) - assert.Equal(t, 0, len(c.Pods)) + assert.Len(t, c.Pods, 0) // pod is created in Pending phase. At this point it has a UID but no start time or pod IP address pod := &api_v1.Pod{} pod.Name = "podD" pod.UID = "11111111-2222-3333-4444-555555555555" c.handlePodAdd(pod) - assert.Equal(t, 1, len(c.Pods)) + assert.Len(t, c.Pods, 1) got := c.Pods[newPodIdentifier("resource_attribute", "k8s.pod.uid", "11111111-2222-3333-4444-555555555555")] assert.Equal(t, "", got.Address) assert.Equal(t, "podD", got.Name) @@ -341,7 +341,7 @@ func TestPodCreate(t *testing.T) { startTime := meta_v1.NewTime(time.Now()) pod.Status.StartTime = &startTime c.handlePodUpdate(&api_v1.Pod{}, pod) - assert.Equal(t, 1, len(c.Pods)) + assert.Len(t, c.Pods, 1) got = c.Pods[newPodIdentifier("resource_attribute", "k8s.pod.uid", "11111111-2222-3333-4444-555555555555")] assert.Equal(t, "", got.Address) assert.Equal(t, "podD", got.Name) @@ -350,7 +350,7 @@ func TestPodCreate(t *testing.T) { // pod is Running and has an IP address pod.Status.PodIP = "3.3.3.3" c.handlePodUpdate(&api_v1.Pod{}, pod) - assert.Equal(t, 3, len(c.Pods)) + assert.Len(t, c.Pods, 3) got = c.Pods[newPodIdentifier("resource_attribute", "k8s.pod.uid", "11111111-2222-3333-4444-555555555555")] assert.Equal(t, "3.3.3.3", got.Address) assert.Equal(t, "podD", got.Name) @@ -377,7 +377,7 @@ func TestPodAddOutOfSync(t *testing.T) { }, }, }) - assert.Equal(t, 0, len(c.Pods)) + assert.Len(t, c.Pods, 0) pod := &api_v1.Pod{} pod.Name = "podA" @@ -385,7 +385,7 @@ func TestPodAddOutOfSync(t *testing.T) { startTime := meta_v1.NewTime(time.Now()) pod.Status.StartTime = &startTime c.handlePodAdd(pod) - assert.Equal(t, 3, len(c.Pods)) + assert.Len(t, c.Pods, 3) got := c.Pods[newPodIdentifier("connection", "k8s.pod.ip", "1.1.1.1")] assert.Equal(t, "1.1.1.1", got.Address) assert.Equal(t, "podA", got.Name) @@ -399,7 +399,7 @@ func TestPodAddOutOfSync(t *testing.T) { startTime2 := meta_v1.NewTime(time.Now().Add(-time.Second * 10)) pod2.Status.StartTime = &startTime2 c.handlePodAdd(pod2) - assert.Equal(t, 4, len(c.Pods)) + assert.Len(t, c.Pods, 4) got = c.Pods[newPodIdentifier("connection", "k8s.pod.ip", "1.1.1.1")] assert.Equal(t, "1.1.1.1", got.Address) assert.Equal(t, "podA", got.Name) @@ -435,7 +435,7 @@ func TestNodeUpdate(t *testing.T) { func TestPodDelete(t *testing.T) { c, _ := newTestClient(t) podAddAndUpdateTest(t, c, c.handlePodAdd) - assert.Equal(t, 5, len(c.Pods)) + assert.Len(t, c.Pods, 5) assert.Equal(t, "1.1.1.1", c.Pods[newPodIdentifier("connection", "k8s.pod.ip", "1.1.1.1")].Address) // delete empty IP pod @@ -446,10 +446,10 @@ func TestPodDelete(t *testing.T) { pod := &api_v1.Pod{} pod.Status.PodIP = "9.9.9.9" c.handlePodDelete(pod) - assert.Equal(t, 5, len(c.Pods)) + assert.Len(t, c.Pods, 5) got := c.Pods[newPodIdentifier("connection", "k8s.pod.ip", "1.1.1.1")] assert.Equal(t, "1.1.1.1", got.Address) - assert.Equal(t, 0, len(c.deleteQueue)) + assert.Len(t, c.deleteQueue, 0) // delete matching IP with wrong name/different pod c.deleteQueue = c.deleteQueue[:0] @@ -457,9 +457,9 @@ func TestPodDelete(t *testing.T) { pod.Status.PodIP = "1.1.1.1" c.handlePodDelete(pod) got = c.Pods[newPodIdentifier("connection", "k8s.pod.ip", "1.1.1.1")] - assert.Equal(t, 5, len(c.Pods)) + assert.Len(t, c.Pods, 5) assert.Equal(t, "1.1.1.1", got.Address) - assert.Equal(t, 0, len(c.deleteQueue)) + assert.Len(t, c.deleteQueue, 0) // delete matching IP and name c.deleteQueue = c.deleteQueue[:0] @@ -468,8 +468,8 @@ func TestPodDelete(t *testing.T) { pod.Status.PodIP = "1.1.1.1" tsBeforeDelete := time.Now() c.handlePodDelete(pod) - assert.Equal(t, 5, len(c.Pods)) - assert.Equal(t, 3, len(c.deleteQueue)) + assert.Len(t, c.Pods, 5) + assert.Len(t, c.deleteQueue, 3) deleteRequest := c.deleteQueue[0] assert.Equal(t, newPodIdentifier("connection", "k8s.pod.ip", "1.1.1.1"), deleteRequest.id) assert.Equal(t, "podB", deleteRequest.podName) @@ -484,8 +484,8 @@ func TestPodDelete(t *testing.T) { pod.UID = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" tsBeforeDelete = time.Now() c.handlePodDelete(cache.DeletedFinalStateUnknown{Obj: pod}) - assert.Equal(t, 5, len(c.Pods)) - assert.Equal(t, 5, len(c.deleteQueue)) + assert.Len(t, c.Pods, 5) + assert.Len(t, c.deleteQueue, 5) deleteRequest = c.deleteQueue[0] assert.Equal(t, newPodIdentifier("connection", "k8s.pod.ip", "2.2.2.2"), deleteRequest.id) assert.Equal(t, "podC", deleteRequest.podName) @@ -501,7 +501,7 @@ func TestPodDelete(t *testing.T) { func TestNamespaceDelete(t *testing.T) { c, _ := newTestClient(t) namespaceAddAndUpdateTest(t, c, c.handleNamespaceAdd) - assert.Equal(t, 2, len(c.Namespaces)) + assert.Len(t, c.Namespaces, 2) assert.Equal(t, "namespaceA", c.Namespaces["namespaceA"].Name) // delete empty namespace @@ -511,32 +511,32 @@ func TestNamespaceDelete(t *testing.T) { namespace := &api_v1.Namespace{} namespace.Name = "namespaceC" c.handleNamespaceDelete(namespace) - assert.Equal(t, 2, len(c.Namespaces)) + assert.Len(t, c.Namespaces, 2) got := c.Namespaces["namespaceA"] assert.Equal(t, "namespaceA", got.Name) // delete non-existent namespace when DeletedFinalStateUnknown c.handleNamespaceDelete(cache.DeletedFinalStateUnknown{Obj: namespace}) - assert.Equal(t, 2, len(c.Namespaces)) + assert.Len(t, c.Namespaces, 2) got = c.Namespaces["namespaceA"] assert.Equal(t, "namespaceA", got.Name) // delete namespace A namespace.Name = "namespaceA" c.handleNamespaceDelete(namespace) - assert.Equal(t, 1, len(c.Namespaces)) + assert.Len(t, c.Namespaces, 1) got = c.Namespaces["namespaceB"] assert.Equal(t, "namespaceB", got.Name) // delete namespace B when DeletedFinalStateUnknown namespace.Name = "namespaceB" c.handleNamespaceDelete(cache.DeletedFinalStateUnknown{Obj: namespace}) - assert.Equal(t, 0, len(c.Namespaces)) + assert.Len(t, c.Namespaces, 0) } func TestNodeDelete(t *testing.T) { c, _ := newTestClient(t) nodeAddAndUpdateTest(t, c, c.handleNodeAdd) - assert.Equal(t, 2, len(c.Nodes)) + assert.Len(t, c.Nodes, 2) assert.Equal(t, "nodeA", c.Nodes["nodeA"].Name) // delete empty node @@ -546,32 +546,32 @@ func TestNodeDelete(t *testing.T) { node := &api_v1.Node{} node.Name = "nodeC" c.handleNodeDelete(node) - assert.Equal(t, 2, len(c.Nodes)) + assert.Len(t, c.Nodes, 2) got := c.Nodes["nodeA"] assert.Equal(t, "nodeA", got.Name) // delete non-existent namespace when DeletedFinalStateUnknown c.handleNodeDelete(cache.DeletedFinalStateUnknown{Obj: node}) - assert.Equal(t, 2, len(c.Nodes)) + assert.Len(t, c.Nodes, 2) got = c.Nodes["nodeA"] assert.Equal(t, "nodeA", got.Name) // delete node A node.Name = "nodeA" c.handleNodeDelete(node) - assert.Equal(t, 1, len(c.Nodes)) + assert.Len(t, c.Nodes, 1) got = c.Nodes["nodeB"] assert.Equal(t, "nodeB", got.Name) // delete node B when DeletedFinalStateUnknown node.Name = "nodeB" c.handleNodeDelete(cache.DeletedFinalStateUnknown{Obj: node}) - assert.Equal(t, 0, len(c.Nodes)) + assert.Len(t, c.Nodes, 0) } func TestDeleteQueue(t *testing.T) { c, _ := newTestClient(t) podAddAndUpdateTest(t, c, c.handlePodAdd) - assert.Equal(t, 5, len(c.Pods)) + assert.Len(t, c.Pods, 5) assert.Equal(t, "1.1.1.1", c.Pods[newPodIdentifier("connection", "k8s.pod.ip", "1.1.1.1")].Address) // delete pod @@ -579,8 +579,8 @@ func TestDeleteQueue(t *testing.T) { pod.Name = "podB" pod.Status.PodIP = "1.1.1.1" c.handlePodDelete(pod) - assert.Equal(t, 5, len(c.Pods)) - assert.Equal(t, 3, len(c.deleteQueue)) + assert.Len(t, c.Pods, 5) + assert.Len(t, c.deleteQueue, 3) } func TestDeleteLoop(t *testing.T) { @@ -590,30 +590,30 @@ func TestDeleteLoop(t *testing.T) { pod := &api_v1.Pod{} pod.Status.PodIP = "1.1.1.1" c.handlePodAdd(pod) - assert.Equal(t, 2, len(c.Pods)) - assert.Equal(t, 0, len(c.deleteQueue)) + assert.Len(t, c.Pods, 2) + assert.Len(t, c.deleteQueue, 0) c.handlePodDelete(pod) - assert.Equal(t, 2, len(c.Pods)) - assert.Equal(t, 3, len(c.deleteQueue)) + assert.Len(t, c.Pods, 2) + assert.Len(t, c.deleteQueue, 3) gracePeriod := time.Millisecond * 500 go c.deleteLoop(time.Millisecond, gracePeriod) go func() { time.Sleep(time.Millisecond * 50) c.m.Lock() - assert.Equal(t, 2, len(c.Pods)) + assert.Len(t, c.Pods, 2) c.m.Unlock() c.deleteMut.Lock() - assert.Equal(t, 3, len(c.deleteQueue)) + assert.Len(t, c.deleteQueue, 3) c.deleteMut.Unlock() time.Sleep(gracePeriod + (time.Millisecond * 50)) c.m.Lock() - assert.Equal(t, 0, len(c.Pods)) + assert.Len(t, c.Pods, 0) c.m.Unlock() c.deleteMut.Lock() - assert.Equal(t, 0, len(c.deleteQueue)) + assert.Len(t, c.deleteQueue, 0) c.deleteMut.Unlock() close(c.stopCh) }() diff --git a/processor/metricsgenerationprocessor/processor_test.go b/processor/metricsgenerationprocessor/processor_test.go index 765f8eb9a992..f6bb1af0777d 100644 --- a/processor/metricsgenerationprocessor/processor_test.go +++ b/processor/metricsgenerationprocessor/processor_test.go @@ -292,7 +292,7 @@ func TestMetricsGenerationProcessor(t *testing.T) { assert.Nil(t, cErr) got := next.AllMetrics() - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) require.Equal(t, test.outMetrics.ResourceMetrics().Len(), got[0].ResourceMetrics().Len()) expectedMetrics := test.outMetrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() diff --git a/processor/metricstransformprocessor/metrics_transform_processor_group_test.go b/processor/metricstransformprocessor/metrics_transform_processor_group_test.go index 85bf8b9e7694..b1781e8cacec 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor_group_test.go +++ b/processor/metricstransformprocessor/metrics_transform_processor_group_test.go @@ -86,7 +86,7 @@ func TestMetricsGrouping(t *testing.T) { assert.NoError(t, cErr) got := next.AllMetrics() - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) require.NoError(t, pmetrictest.CompareMetrics(expected, got[0], pmetrictest.IgnoreMetricValues())) assert.NoError(t, mtp.Shutdown(context.Background())) diff --git a/processor/metricstransformprocessor/metrics_transform_processor_test.go b/processor/metricstransformprocessor/metrics_transform_processor_test.go index 22e5b851a304..079fa5256d4d 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor_test.go +++ b/processor/metricstransformprocessor/metrics_transform_processor_test.go @@ -50,7 +50,7 @@ func TestMetricsTransformProcessor(t *testing.T) { // get and check results got := next.AllMetrics() - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) gotMetricsSlice := pmetric.NewMetricSlice() if got[0].ResourceMetrics().Len() > 0 { gotMetricsSlice = got[0].ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() diff --git a/processor/probabilisticsamplerprocessor/logsprocessor_test.go b/processor/probabilisticsamplerprocessor/logsprocessor_test.go index 7cfeb896a230..7de3b0cc1ef7 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor_test.go @@ -411,10 +411,10 @@ func TestLogsSamplingState(t *testing.T) { require.NoError(t, err) if len(tt.log) == 0 { - require.Equal(t, 0, len(observed.All()), "should not have logs: %v", observed.All()) + require.Len(t, observed.All(), 0, "should not have logs: %v", observed.All()) require.Equal(t, "", tt.log) } else { - require.Equal(t, 1, len(observed.All()), "should have one log: %v", observed.All()) + require.Len(t, observed.All(), 1, "should have one log: %v", observed.All()) require.Contains(t, observed.All()[0].Message, "logs sampler") require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), tt.log) } @@ -422,7 +422,7 @@ func TestLogsSamplingState(t *testing.T) { sampledData := sink.AllLogs() if tt.sampled { - require.Equal(t, 1, len(sampledData)) + require.Len(t, sampledData, 1) assert.Equal(t, 1, sink.LogRecordCount()) got := sink.AllLogs()[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0) gotAttrs := got.Attributes() @@ -504,20 +504,20 @@ func TestLogsMissingRandomness(t *testing.T) { sampledData := sink.AllLogs() if tt.sampled { - require.Equal(t, 1, len(sampledData)) + require.Len(t, sampledData, 1) assert.Equal(t, 1, sink.LogRecordCount()) } else { - require.Equal(t, 0, len(sampledData)) + require.Len(t, sampledData, 0) assert.Equal(t, 0, sink.LogRecordCount()) } if tt.pct != 0 { // pct==0 bypasses the randomness check - require.Equal(t, 1, len(observed.All()), "should have one log: %v", observed.All()) + require.Len(t, observed.All(), 1, "should have one log: %v", observed.All()) require.Contains(t, observed.All()[0].Message, "logs sampler") require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), "missing randomness") } else { - require.Equal(t, 0, len(observed.All()), "should have no logs: %v", observed.All()) + require.Len(t, observed.All(), 0, "should have no logs: %v", observed.All()) } }) } diff --git a/processor/probabilisticsamplerprocessor/tracesprocessor_test.go b/processor/probabilisticsamplerprocessor/tracesprocessor_test.go index 608296e94e4c..845b80bc7154 100644 --- a/processor/probabilisticsamplerprocessor/tracesprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/tracesprocessor_test.go @@ -276,20 +276,20 @@ func Test_tracessamplerprocessor_MissingRandomness(t *testing.T) { sampledData := sink.AllTraces() if tt.sampled { - require.Equal(t, 1, len(sampledData)) + require.Len(t, sampledData, 1) assert.Equal(t, 1, sink.SpanCount()) } else { - require.Equal(t, 0, len(sampledData)) + require.Len(t, sampledData, 0) assert.Equal(t, 0, sink.SpanCount()) } if tt.pct != 0 { // pct==0 bypasses the randomness check - require.Equal(t, 1, len(observed.All()), "should have one log: %v", observed.All()) + require.Len(t, observed.All(), 1, "should have one log: %v", observed.All()) require.Contains(t, observed.All()[0].Message, "traces sampler") require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), "missing randomness") } else { - require.Equal(t, 0, len(observed.All()), "should have no logs: %v", observed.All()) + require.Len(t, observed.All(), 0, "should have no logs: %v", observed.All()) } }) } @@ -406,10 +406,10 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { sampledData := sink.AllTraces() if tt.sampled { - require.Equal(t, 1, len(sampledData)) + require.Len(t, sampledData, 1) assert.Equal(t, 1, sink.SpanCount()) } else { - require.Equal(t, 0, len(sampledData)) + require.Len(t, sampledData, 0) assert.Equal(t, 0, sink.SpanCount()) } }) @@ -882,7 +882,7 @@ func Test_tracesamplerprocessor_TraceState(t *testing.T) { expectSampled, expectCount, expectTS = tt.sf(mode) } if expectSampled { - require.Equal(t, 1, len(sampledData)) + require.Len(t, sampledData, 1) assert.Equal(t, 1, sink.SpanCount()) got := sink.AllTraces()[0].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0) gotTs, err := sampling.NewW3CTraceState(got.TraceState().AsRaw()) @@ -899,15 +899,15 @@ func Test_tracesamplerprocessor_TraceState(t *testing.T) { } require.Equal(t, expectTS, got.TraceState().AsRaw()) } else { - require.Equal(t, 0, len(sampledData)) + require.Len(t, sampledData, 0) assert.Equal(t, 0, sink.SpanCount()) require.Equal(t, "", expectTS) } if len(tt.log) == 0 { - require.Equal(t, 0, len(observed.All()), "should not have logs: %v", observed.All()) + require.Len(t, observed.All(), 0, "should not have logs: %v", observed.All()) } else { - require.Equal(t, 1, len(observed.All()), "should have one log: %v", observed.All()) + require.Len(t, observed.All(), 1, "should have one log: %v", observed.All()) require.Contains(t, observed.All()[0].Message, "traces sampler") require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), tt.log) } @@ -1026,10 +1026,10 @@ func Test_tracesamplerprocessor_TraceStateErrors(t *testing.T) { sampledData := sink.AllTraces() - require.Equal(t, 0, len(sampledData)) + require.Len(t, sampledData, 0) assert.Equal(t, 0, sink.SpanCount()) - require.Equal(t, 1, len(observed.All()), "should have one log: %v", observed.All()) + require.Len(t, observed.All(), 1, "should have one log: %v", observed.All()) if observed.All()[0].Message == "trace sampler" { require.Contains(t, observed.All()[0].Context[0].Interface.(error).Error(), expectMessage) } else { diff --git a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go index 9e17934f0eba..88e3ce91524b 100644 --- a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go +++ b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go @@ -298,7 +298,7 @@ func TestFilterAttributes_NilAttributes(t *testing.T) { _, ok = attr.Get("host.id") assert.True(t, ok) - assert.Equal(t, len(droppedAttributes), 0) + assert.Len(t, droppedAttributes, 0) } func TestFilterAttributes_NoAttributes(t *testing.T) { @@ -315,5 +315,5 @@ func TestFilterAttributes_NoAttributes(t *testing.T) { _, ok = attr.Get("host.id") assert.True(t, ok) - assert.Equal(t, len(droppedAttributes), 0) + assert.Len(t, droppedAttributes, 0) } diff --git a/processor/tailsamplingprocessor/processor_test.go b/processor/tailsamplingprocessor/processor_test.go index 84bded10d543..a567e4bf9a3e 100644 --- a/processor/tailsamplingprocessor/processor_test.go +++ b/processor/tailsamplingprocessor/processor_test.go @@ -114,7 +114,7 @@ func TestTraceIntegrity(t *testing.T) { span.SetTraceID(pcommon.TraceID([16]byte{13, 14, 15, 16})) spans[spanID] = spanInfo{span: span, resource: resource, scope: scope} - require.Equal(t, spanCount, len(spans)) + require.Len(t, spans, spanCount) cfg := Config{ DecisionWait: defaultTestDecisionWait, @@ -157,7 +157,7 @@ func TestTraceIntegrity(t *testing.T) { require.EqualValues(t, 4, mpe1.EvaluationCount) consumed := nextConsumer.AllTraces() - require.Equal(t, 4, len(consumed)) + require.Len(t, consumed, 4) for _, trace := range consumed { require.Equal(t, 1, trace.SpanCount()) require.Equal(t, 1, trace.ResourceSpans().Len()) @@ -410,7 +410,7 @@ func TestMultipleBatchesAreCombinedIntoOne(t *testing.T) { tsp.policyTicker.OnTick() // the first tick always gets an empty batch tsp.policyTicker.OnTick() - require.EqualValues(t, 3, len(msp.AllTraces()), "There should be three batches, one for each trace") + require.Len(t, msp.AllTraces(), 3, "There should be three batches, one for each trace") expectedSpanIDs := make(map[int][]pcommon.SpanID) expectedSpanIDs[0] = []pcommon.SpanID{ diff --git a/processor/transformprocessor/config_test.go b/processor/transformprocessor/config_test.go index 1e4c3f028580..fe30c4c58ac1 100644 --- a/processor/transformprocessor/config_test.go +++ b/processor/transformprocessor/config_test.go @@ -165,7 +165,7 @@ func TestLoadConfig(t *testing.T) { assert.Error(t, err) if tt.errorLen > 0 { - assert.Equal(t, tt.errorLen, len(multierr.Errors(err))) + assert.Len(t, multierr.Errors(err), tt.errorLen) } return diff --git a/receiver/awscloudwatchreceiver/logs_test.go b/receiver/awscloudwatchreceiver/logs_test.go index 104fc1ca5388..6861abe2280c 100644 --- a/receiver/awscloudwatchreceiver/logs_test.go +++ b/receiver/awscloudwatchreceiver/logs_test.go @@ -162,7 +162,7 @@ func TestDiscovery(t *testing.T) { require.Eventually(t, func() bool { return sink.LogRecordCount() > 0 }, 2*time.Second, 10*time.Millisecond) - require.Equal(t, len(logsRcvr.groupRequests), 2) + require.Len(t, logsRcvr.groupRequests, 2) require.NoError(t, logsRcvr.Shutdown(context.Background())) } diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/container_info_processor_test.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/container_info_processor_test.go index 6675c25a3617..bd9a5b6f566e 100644 --- a/receiver/awscontainerinsightreceiver/internal/cadvisor/container_info_processor_test.go +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/container_info_processor_test.go @@ -64,7 +64,7 @@ func TestProcessContainers(t *testing.T) { containerInfos = append(containerInfos, containerInContainerInfos...) mInfo := testutils.MockCPUMemInfo{} metrics := processContainers(containerInfos, mInfo, "eks", zap.NewNop()) - assert.Equal(t, 3, len(metrics)) + assert.Len(t, metrics, 3) // restore the original value of metrics extractors metricsExtractors = originalMetricsExtractors diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/diskio_extractor_test.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/diskio_extractor_test.go index f8492d8d0c85..03d2352844d3 100644 --- a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/diskio_extractor_test.go +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/diskio_extractor_test.go @@ -98,5 +98,5 @@ func TestDiskIOStats(t *testing.T) { cMetrics = extractor.GetValue(result2[0], nil, containerType) } - assert.Equal(t, len(cMetrics), 0) + assert.Len(t, cMetrics, 0) } diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor_test.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor_test.go index 79e1ce986820..3cb09509b14e 100644 --- a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor_test.go +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor_test.go @@ -25,7 +25,7 @@ func TestCAdvisorMetric_Merge(t *testing.T) { logger: zap.NewNop(), } src.Merge(dest) - assert.Equal(t, 3, len(src.fields)) + assert.Len(t, src.fields, 3) assert.Equal(t, 1, src.fields["value1"].(int)) } diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor_test.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor_test.go index 5e942cf80339..7549ac093851 100644 --- a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor_test.go +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/fs_extractor_test.go @@ -47,7 +47,7 @@ func TestFSStats(t *testing.T) { cMetrics = extractor.GetValue(result[0], nil, containerType) } - assert.Equal(t, len(cMetrics), 0) + assert.Len(t, cMetrics, 0) // node type for eks @@ -131,7 +131,7 @@ func TestFSStatsWithAllowList(t *testing.T) { } // There are 3 valid device names which pass the allowlist in testAllowList json. - assert.Equal(t, 3, len(cMetrics)) + assert.Len(t, cMetrics, 3) assert.Equal(t, "tmpfs", cMetrics[0].tags["device"]) assert.Equal(t, "/dev/xvda1", cMetrics[1].tags["device"]) assert.Equal(t, "overlay", cMetrics[2].tags["device"]) diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/net_extractor_test.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/net_extractor_test.go index c5c067286690..7e5013c9378f 100644 --- a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/net_extractor_test.go +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/net_extractor_test.go @@ -152,7 +152,7 @@ func TestNetStats(t *testing.T) { }, } - assert.Equal(t, len(cMetrics), 8) + assert.Len(t, cMetrics, 8) for i := range expectedFields { AssertContainsTaggedField(t, cMetrics[i], expectedFields[i], expectedTags[i]) } diff --git a/receiver/awscontainerinsightreceiver/internal/ecsInfo/ecs_task_info_test.go b/receiver/awscontainerinsightreceiver/internal/ecsInfo/ecs_task_info_test.go index 6eb1a635d352..24e2a5aef3b0 100644 --- a/receiver/awscontainerinsightreceiver/internal/ecsInfo/ecs_task_info_test.go +++ b/receiver/awscontainerinsightreceiver/internal/ecsInfo/ecs_task_info_test.go @@ -75,7 +75,7 @@ func TestECSTaskInfoFail(t *testing.T) { ecsTaskinfo := newECSTaskInfo(ctx, hostIPProvider, time.Minute, zap.NewNop(), mockHTTP, taskReadyC) assert.NotNil(t, ecsTaskinfo) assert.Equal(t, int64(0), ecsTaskinfo.getRunningTaskCount()) - assert.Equal(t, 0, len(ecsTaskinfo.getRunningTasksInfo())) + assert.Len(t, ecsTaskinfo.getRunningTasksInfo(), 0) data, err := os.ReadFile("./test/ecsinfo/taskinfo_wrong") body := string(data) @@ -87,6 +87,6 @@ func TestECSTaskInfoFail(t *testing.T) { ecsTaskinfo = newECSTaskInfo(ctx, hostIPProvider, time.Minute, zap.NewNop(), mockHTTP, taskReadyC) assert.NotNil(t, ecsTaskinfo) assert.Equal(t, int64(0), ecsTaskinfo.getRunningTaskCount()) - assert.Equal(t, 0, len(ecsTaskinfo.getRunningTasksInfo())) + assert.Len(t, ecsTaskinfo.getRunningTasksInfo(), 0) } diff --git a/receiver/awscontainerinsightreceiver/internal/host/ebsvolume_test.go b/receiver/awscontainerinsightreceiver/internal/host/ebsvolume_test.go index b24ce9030524..f0f1de6757ee 100644 --- a/receiver/awscontainerinsightreceiver/internal/host/ebsvolume_test.go +++ b/receiver/awscontainerinsightreceiver/internal/host/ebsvolume_test.go @@ -170,7 +170,7 @@ func TestEBSVolume(t *testing.T) { assert.Equal(t, "", e.getEBSVolumeID("/dev/invalid")) ebsIDs := e.extractEbsIDsUsedByKubernetes() - assert.Equal(t, 1, len(ebsIDs)) + assert.Len(t, ebsIDs, 1) assert.Equal(t, "aws://us-west-2b/vol-0d9f0816149eb2050", ebsIDs["/dev/nvme1n1"]) // set e.hostMounts to an invalid path @@ -180,5 +180,5 @@ func TestEBSVolume(t *testing.T) { e = newEBSVolume(ctx, sess, "instanceId", "us-west-2", time.Millisecond, zap.NewNop(), clientOption, maxJitterOption, hostMountsOption, LstatOption, evalSymLinksOption) ebsIDs = e.extractEbsIDsUsedByKubernetes() - assert.Equal(t, 0, len(ebsIDs)) + assert.Len(t, ebsIDs, 0) } diff --git a/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go b/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go index 34a2d94c283f..9dd73257f47d 100644 --- a/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go +++ b/receiver/awscontainerinsightreceiver/internal/stores/podstore_test.go @@ -557,7 +557,7 @@ func TestPodStore_addPodOwnersAndPodName(t *testing.T) { kubernetesBlob = map[string]any{} podStore.addPodOwnersAndPodName(metric, pod, kubernetesBlob) assert.Equal(t, kpName, metric.GetTag(ci.PodNameKey)) - assert.True(t, len(kubernetesBlob) == 0) + assert.Len(t, kubernetesBlob, 0) podStore.prefFullPodName = false metric = generateMetric(fields, tags) @@ -566,7 +566,7 @@ func TestPodStore_addPodOwnersAndPodName(t *testing.T) { kubernetesBlob = map[string]any{} podStore.addPodOwnersAndPodName(metric, pod, kubernetesBlob) assert.Equal(t, kubeProxy, metric.GetTag(ci.PodNameKey)) - assert.True(t, len(kubernetesBlob) == 0) + assert.Len(t, kubernetesBlob, 0) } type mockPodClient struct { diff --git a/receiver/azureblobreceiver/config_test.go b/receiver/azureblobreceiver/config_test.go index b1deb0ef1319..7796d1047bae 100644 --- a/receiver/azureblobreceiver/config_test.go +++ b/receiver/azureblobreceiver/config_test.go @@ -29,7 +29,7 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, cfg) - assert.Equal(t, len(cfg.Receivers), 2) + assert.Len(t, cfg.Receivers, 2) receiver := cfg.Receivers[component.NewID(metadata.Type)] assert.NoError(t, componenttest.CheckConfigStruct(receiver)) diff --git a/receiver/azureeventhubreceiver/config_test.go b/receiver/azureeventhubreceiver/config_test.go index 4f658844f047..1e48db57efb4 100644 --- a/receiver/azureeventhubreceiver/config_test.go +++ b/receiver/azureeventhubreceiver/config_test.go @@ -28,7 +28,7 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, cfg) - assert.Equal(t, len(cfg.Receivers), 2) + assert.Len(t, cfg.Receivers, 2) r0 := cfg.Receivers[component.NewID(metadata.Type)] assert.Equal(t, "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName", r0.(*Config).Connection) diff --git a/receiver/datadogreceiver/internal/translator/series_test.go b/receiver/datadogreceiver/internal/translator/series_test.go index 23bcb0f041f9..e3d0400a0edf 100644 --- a/receiver/datadogreceiver/internal/translator/series_test.go +++ b/receiver/datadogreceiver/internal/translator/series_test.go @@ -83,9 +83,9 @@ func TestHandleMetricsPayloadV2(t *testing.T) { series, err := mt.HandleSeriesV2Payload(req) require.NoError(t, err) require.NoError(t, err, "Failed to parse metrics payload") - require.Equal(t, tt.expectedSeriesCount, len(series)) + require.Len(t, series, tt.expectedSeriesCount) for i, s := range series { - require.Equal(t, tt.expectedPointsCounts[i], len(s.Points)) + require.Len(t, s.Points, tt.expectedPointsCounts[i]) } }) } diff --git a/receiver/datadogreceiver/internal/translator/traces_translator_test.go b/receiver/datadogreceiver/internal/translator/traces_translator_test.go index 423390a9cf3e..60cb51029bd8 100644 --- a/receiver/datadogreceiver/internal/translator/traces_translator_test.go +++ b/receiver/datadogreceiver/internal/translator/traces_translator_test.go @@ -121,11 +121,11 @@ func TestTracePayloadV07Unmarshalling(t *testing.T) { req, _ := http.NewRequest(http.MethodPost, "/v0.7/traces", io.NopCloser(bytes.NewReader(bytez))) translatedPayloads, _ := HandleTracesPayload(req) - assert.Equal(t, len(translatedPayloads), 1, "Expected one translated payload") + assert.Len(t, translatedPayloads, 1, "Expected one translated payload") translated := translatedPayloads[0] span := translated.GetChunks()[0].GetSpans()[0] assert.NotNil(t, span) - assert.Equal(t, 5, len(span.GetMeta()), "missing attributes") + assert.Len(t, span.GetMeta(), 5, "missing attributes") value, exists := span.GetMeta()["service.name"] assert.True(t, exists, "service.name missing") assert.Equal(t, "my-service", value, "service.name attribute value incorrect") @@ -157,15 +157,15 @@ func TestTracePayloadApiV02Unmarshalling(t *testing.T) { req, _ := http.NewRequest(http.MethodPost, "/api/v0.2/traces", io.NopCloser(bytes.NewReader(bytez))) translatedPayloads, _ := HandleTracesPayload(req) - assert.Equal(t, len(translatedPayloads), 2, "Expected two translated payload") + assert.Len(t, translatedPayloads, 2, "Expected two translated payload") for _, translated := range translatedPayloads { assert.NotNil(t, translated) - assert.Equal(t, 1, len(translated.Chunks)) - assert.Equal(t, 1, len(translated.Chunks[0].Spans)) + assert.Len(t, translated.Chunks, 1) + assert.Len(t, translated.Chunks[0].Spans, 1) span := translated.Chunks[0].Spans[0] assert.NotNil(t, span) - assert.Equal(t, 5, len(span.Meta), "missing attributes") + assert.Len(t, span.Meta, 5, "missing attributes") assert.Equal(t, "my-service", span.Meta["service.name"]) assert.Equal(t, "my-name", span.Name) assert.Equal(t, "my-resource", span.Resource) diff --git a/receiver/gitproviderreceiver/config_test.go b/receiver/gitproviderreceiver/config_test.go index 925423f20924..84d9b22d8869 100644 --- a/receiver/gitproviderreceiver/config_test.go +++ b/receiver/gitproviderreceiver/config_test.go @@ -33,7 +33,7 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, cfg) - assert.Equal(t, len(cfg.Receivers), 2) + assert.Len(t, cfg.Receivers, 2) r0 := cfg.Receivers[component.NewID(metadata.Type)] defaultConfigGitHubScraper := factory.CreateDefaultConfig() diff --git a/receiver/gitproviderreceiver/internal/scraper/githubscraper/helpers_test.go b/receiver/gitproviderreceiver/internal/scraper/githubscraper/helpers_test.go index f9ee0647d7b5..4007da15dfa8 100644 --- a/receiver/gitproviderreceiver/internal/scraper/githubscraper/helpers_test.go +++ b/receiver/gitproviderreceiver/internal/scraper/githubscraper/helpers_test.go @@ -453,7 +453,7 @@ func TestGetPullRequests(t *testing.T) { prs, err := ghs.getPullRequests(context.Background(), client, "repo name") - assert.Equal(t, tc.expectedPrCount, len(prs)) + assert.Len(t, prs, tc.expectedPrCount) if tc.expectedErr == nil { assert.NoError(t, err) } else { diff --git a/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality_test.go b/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality_test.go index 1a9858583d20..9b4f06cb1998 100644 --- a/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality_test.go +++ b/receiver/googlecloudspannerreceiver/internal/filter/itemcardinality_test.go @@ -140,7 +140,7 @@ func TestItemCardinalityFilter_Filter(t *testing.T) { require.NoError(t, err) // Cache timeout hasn't been reached, so filtered out all items - assert.Equal(t, 0, len(filteredItems)) + assert.Len(t, filteredItems, 0) // Doing this to avoid of relying on timeouts and sleeps(avoid potential flaky tests) syncChannel := make(chan bool) @@ -192,13 +192,13 @@ func TestItemCardinalityFilter_FilterItems(t *testing.T) { filteredItems, err = filterCasted.filterItems(items) require.NoError(t, err) - assert.Equal(t, totalLimit, len(filteredItems)) + assert.Len(t, filteredItems, totalLimit) filteredItems, err = filter.Filter(items) require.NoError(t, err) // Cache timeout hasn't been reached, so no more new items expected - assert.Equal(t, totalLimit, len(filteredItems)) + assert.Len(t, filteredItems, totalLimit) // Doing this to avoid of relying on timeouts and sleeps(avoid potential flaky tests) syncChannel := make(chan bool) @@ -280,7 +280,7 @@ func TestGroupByTimestamp(t *testing.T) { items := initialItems(t) groupedItems := groupByTimestamp(items) - assert.Equal(t, 3, len(groupedItems)) + assert.Len(t, groupedItems, 3) assertGroupedByKey(t, items, groupedItems, timestamp1, 0) assertGroupedByKey(t, items, groupedItems, timestamp2, 3) assertGroupedByKey(t, items, groupedItems, timestamp3, 6) diff --git a/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go b/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go index 42a8273812a2..ab7d755ae582 100644 --- a/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go +++ b/receiver/googlecloudspannerreceiver/internal/filter/testhelpers_test.go @@ -35,7 +35,7 @@ const ( ) func assertGroupedByKey(t *testing.T, items []*Item, groupedItems map[time.Time][]*Item, key time.Time, offsetInItems int) { - assert.Equal(t, 3, len(groupedItems[key])) + assert.Len(t, groupedItems[key], 3) for i := 0; i < 3; i++ { assert.Equal(t, items[i+offsetInItems].SeriesKey, groupedItems[key][i].SeriesKey) diff --git a/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder_test.go b/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder_test.go index 71ee09d89f21..7703fb4f5320 100644 --- a/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder_test.go +++ b/receiver/googlecloudspannerreceiver/internal/filterfactory/filterbuilder_test.go @@ -30,7 +30,7 @@ func TestFilterBuilder_BuildFilterByMetricZeroTotalLimit(t *testing.T) { result := builder.buildFilterByMetricZeroTotalLimit() // Because we have 2 groups and each group has 2 metrics - assert.Equal(t, len(metricPrefixes)*2, len(result)) + assert.Len(t, result, len(metricPrefixes)*2) for _, metadataItem := range metadataItems { for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { f, exists := result[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] @@ -82,7 +82,7 @@ func TestFilterBuilder_BuildFilterByMetricPositiveTotalLimit(t *testing.T) { require.NoError(t, err) // Because we have 2 groups and each group has 2 metrics - assert.Equal(t, len(testCase.metricPrefixes)*2, len(result)) + assert.Len(t, result, len(testCase.metricPrefixes)*2) for _, metadataItem := range metadataItems { for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { f, exists := result[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] @@ -138,7 +138,7 @@ func TestFilterBuilder_HandleLowCardinalityGroups(t *testing.T) { require.NoError(t, err) // Because we have 2 groups and each group has 2 metrics - assert.Equal(t, len(testCase.metricPrefixes)*2, len(filterByMetric)) + assert.Len(t, filterByMetric, len(testCase.metricPrefixes)*2) for _, metadataItem := range metadataItems { for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { f, exists := filterByMetric[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] @@ -194,7 +194,7 @@ func TestFilterBuilder_HandleHighCardinalityGroups(t *testing.T) { require.NoError(t, err) // Because we have 2 groups and each group has 2 metrics - assert.Equal(t, len(testCase.metricPrefixes)*2, len(filterByMetric)) + assert.Len(t, filterByMetric, len(testCase.metricPrefixes)*2) for _, metadataItem := range metadataItems { for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { f, exists := filterByMetric[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] @@ -229,7 +229,7 @@ func TestFilterBuilder_TestConstructFiltersForGroups(t *testing.T) { require.NoError(t, err) // Because we have 2 groups and each group has 2 metrics - assert.Equal(t, len(metricPrefixes)*2, len(filterByMetric)) + assert.Len(t, filterByMetric, len(metricPrefixes)*2) for _, metadataItem := range metadataItems { for _, metricValueMetadata := range metadataItem.QueryMetricValuesMetadata { f, exists := filterByMetric[metadataItem.MetricNamePrefix+metricValueMetadata.Name()] @@ -257,12 +257,12 @@ func TestGroupByCardinality(t *testing.T) { result := groupByCardinality(metadataItems) - assert.Equal(t, 2, len(result)) + assert.Len(t, result, 2) for _, metadataItem := range metadataItems { groups, exists := result[metadataItem.HighCardinality] assert.True(t, exists) - assert.Equal(t, 1, len(groups)) + assert.Len(t, groups, 1) assert.Equal(t, metadataItem, groups[0]) } } diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go index df4291f32b31..0d12bdaa9506 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go @@ -221,7 +221,7 @@ func TestMetricsFromDataPointBuilder_GroupAndFilter_NilDataPoints(t *testing.T) require.NoError(t, err) - assert.Equal(t, 0, len(groupedDataPoints)) + assert.Len(t, groupedDataPoints, 0) } func TestMetricsFromDataPointBuilder_Filter(t *testing.T) { diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go index e8cc49cd09c6..6167a5e37f1c 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go @@ -126,7 +126,7 @@ func TestMetricsDataPoint_HideLockStatsRowrangestartkeyPII(t *testing.T) { metricsDataPoint.HideLockStatsRowrangestartkeyPII() - assert.Equal(t, len(metricsDataPoint.labelValues), 2) + assert.Len(t, metricsDataPoint.labelValues, 2) assert.Equal(t, metricsDataPoint.labelValues[0].Value(), "table1.s("+hashOf23+","+hashOfHello+","+hashOf23+"+)") assert.Equal(t, metricsDataPoint.labelValues[1].Value(), "table2("+hashOf23+","+hashOfHello+")") } @@ -149,7 +149,7 @@ func TestMetricsDataPoint_HideLockStatsRowrangestartkeyPIIWithInvalidLabelValue( metricValue: metricValues[0], } metricsDataPoint.HideLockStatsRowrangestartkeyPII() - assert.Equal(t, len(metricsDataPoint.labelValues), 4) + assert.Len(t, metricsDataPoint.labelValues, 4) } func TestMetricsDataPoint_TruncateQueryText(t *testing.T) { @@ -168,7 +168,7 @@ func TestMetricsDataPoint_TruncateQueryText(t *testing.T) { metricsDataPoint.TruncateQueryText(6) - assert.Equal(t, len(metricsDataPoint.labelValues), 1) + assert.Len(t, metricsDataPoint.labelValues, 1) assert.Equal(t, metricsDataPoint.labelValues[0].Value(), "SELECT") } diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata_test.go index 7f9d4a22caa2..683edc59ccc3 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsmetadata_test.go @@ -259,7 +259,7 @@ func TestMetricsMetadata_RowToMetricsDataPoints(t *testing.T) { require.Error(t, err) } else { require.NoError(t, err) - assert.Equal(t, 1, len(dataPoints)) + assert.Len(t, dataPoints, 1) } }) } diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata_test.go index 4739cccb7a74..3ff0f7ac8fd6 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadata_test.go @@ -41,7 +41,7 @@ func TestMetadata_ToLabelValuesMetadata(t *testing.T) { require.NotNil(t, valuesMetadata) require.NoError(t, err) - assert.Equal(t, 1, len(valuesMetadata)) + assert.Len(t, valuesMetadata, 1) } }) } @@ -80,7 +80,7 @@ func TestMetadata_ToMetricValuesMetadata(t *testing.T) { require.NotNil(t, valuesMetadata) require.NoError(t, err) - assert.Equal(t, 1, len(valuesMetadata)) + assert.Len(t, valuesMetadata, 1) } }) } @@ -137,8 +137,8 @@ func TestMetadata_MetricsMetadata(t *testing.T) { assert.Equal(t, md.MetricNamePrefix, metricsMetadata.MetricNamePrefix) assert.Equal(t, md.TimestampColumnName, metricsMetadata.TimestampColumnName) assert.Equal(t, md.HighCardinality, metricsMetadata.HighCardinality) - assert.Equal(t, 1, len(metricsMetadata.QueryLabelValuesMetadata)) - assert.Equal(t, 1, len(metricsMetadata.QueryMetricValuesMetadata)) + assert.Len(t, metricsMetadata.QueryLabelValuesMetadata, 1) + assert.Len(t, metricsMetadata.QueryMetricValuesMetadata, 1) } }) } diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go index 7c40e0f778a0..1f0d7e93c944 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go @@ -37,7 +37,7 @@ func TestParseMetadataConfig(t *testing.T) { require.Nil(t, metadataSlice) } else { require.NoError(t, err) - assert.Equal(t, 2, len(metadataSlice)) + assert.Len(t, metadataSlice, 2) mData := metadataSlice[0] @@ -58,12 +58,12 @@ func assertMetricsMetadata(t *testing.T, expectedName string, metricsMetadata *m assert.Equal(t, "query", metricsMetadata.Query) assert.Equal(t, "metric_name_prefix", metricsMetadata.MetricNamePrefix) - assert.Equal(t, 1, len(metricsMetadata.QueryLabelValuesMetadata)) + assert.Len(t, metricsMetadata.QueryLabelValuesMetadata, 1) assert.Equal(t, "label_name", metricsMetadata.QueryLabelValuesMetadata[0].Name()) assert.Equal(t, "LABEL_NAME", metricsMetadata.QueryLabelValuesMetadata[0].ColumnName()) assert.Equal(t, metadata.StringValueType, metricsMetadata.QueryLabelValuesMetadata[0].ValueType()) - assert.Equal(t, 1, len(metricsMetadata.QueryMetricValuesMetadata)) + assert.Len(t, metricsMetadata.QueryMetricValuesMetadata, 1) assert.Equal(t, "metric_name", metricsMetadata.QueryMetricValuesMetadata[0].Name()) assert.Equal(t, "METRIC_NAME", metricsMetadata.QueryMetricValuesMetadata[0].ColumnName()) assert.Equal(t, "metric_unit", metricsMetadata.QueryMetricValuesMetadata[0].Unit()) diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader_test.go index 1df801b20eb3..e502fe3910f4 100644 --- a/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader_test.go +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/databasereader_test.go @@ -50,7 +50,7 @@ func TestNewDatabaseReader(t *testing.T) { assert.Equal(t, databaseID, reader.database.DatabaseID()) assert.Equal(t, logger, reader.logger) - assert.Equal(t, 0, len(reader.readers)) + assert.Len(t, reader.readers, 0) } func TestNewDatabaseReaderWithError(t *testing.T) { @@ -94,7 +94,7 @@ func TestInitializeReaders(t *testing.T) { readers := initializeReaders(logger, parsedMetadata, database, readerConfig) - assert.Equal(t, 2, len(readers)) + assert.Len(t, readers, 2) assert.IsType(t, ¤tStatsReader{}, readers[0]) assert.IsType(t, &intervalStatsReader{}, readers[1]) } diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go index d11691e9fc92..72641950f094 100644 --- a/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go @@ -192,7 +192,7 @@ func TestStatsReaders_Read(t *testing.T) { require.Error(t, err) } else { require.NoError(t, err) - assert.Equal(t, testCase.expectedMetricsAmount, len(metrics)) + assert.Len(t, metrics, testCase.expectedMetricsAmount) } }) } diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go index 791fcfcdcb32..93b666cf31a8 100644 --- a/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/timestampsgenerator_test.go @@ -39,7 +39,7 @@ func TestTimestampsGenerator_PullTimestamps(t *testing.T) { } timestamps := generator.pullTimestamps(testCase.lastPullTimestamp, now) - assert.Equal(t, testCase.amountOfTimestamps, len(timestamps)) + assert.Len(t, timestamps, testCase.amountOfTimestamps) }) } } @@ -51,7 +51,7 @@ func TestPullTimestampsWithDifference(t *testing.T) { timestamps := pullTimestampsWithDifference(lowerBound, upperBound, time.Minute) - assert.Equal(t, expectedAmountOfTimestamps, len(timestamps)) + assert.Len(t, timestamps, expectedAmountOfTimestamps) expectedTimestamp := lowerBound.Add(time.Minute) @@ -64,7 +64,7 @@ func TestPullTimestampsWithDifference(t *testing.T) { upperBound = lowerBound.Add(5 * time.Minute).Add(15 * time.Second) timestamps = pullTimestampsWithDifference(lowerBound, upperBound, time.Minute) - assert.Equal(t, 6, len(timestamps)) + assert.Len(t, timestamps, 6) expectedTimestamp = lowerBound.Add(time.Minute) diff --git a/receiver/googlecloudspannerreceiver/receiver_test.go b/receiver/googlecloudspannerreceiver/receiver_test.go index 6b300ba7fbd8..de55ca9b63aa 100644 --- a/receiver/googlecloudspannerreceiver/receiver_test.go +++ b/receiver/googlecloudspannerreceiver/receiver_test.go @@ -116,10 +116,10 @@ func TestStart(t *testing.T) { if testCase.expectError { require.Error(t, err) - assert.Equal(t, 0, len(receiver.projectReaders)) + assert.Len(t, receiver.projectReaders, 0) } else { require.NoError(t, err) - assert.Equal(t, 1, len(receiver.projectReaders)) + assert.Len(t, receiver.projectReaders, 1) } }) } @@ -189,10 +189,10 @@ func TestInitializeProjectReaders(t *testing.T) { if testCase.expectError { require.Error(t, err) - assert.Equal(t, 0, len(receiver.projectReaders)) + assert.Len(t, receiver.projectReaders, 0) } else { require.NoError(t, err) - assert.Equal(t, 1, len(receiver.projectReaders)) + assert.Len(t, receiver.projectReaders, 1) } }) } diff --git a/receiver/hostmetricsreceiver/config_test.go b/receiver/hostmetricsreceiver/config_test.go index f5247a19651d..61559ac81f90 100644 --- a/receiver/hostmetricsreceiver/config_test.go +++ b/receiver/hostmetricsreceiver/config_test.go @@ -42,7 +42,7 @@ func TestLoadConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, cfg) - assert.Equal(t, len(cfg.Receivers), 2) + assert.Len(t, cfg.Receivers, 2) r0 := cfg.Receivers[component.NewID(metadata.Type)] defaultConfigCPUScraper := factory.CreateDefaultConfig() diff --git a/receiver/jaegerreceiver/jaeger_agent_test.go b/receiver/jaegerreceiver/jaeger_agent_test.go index 5434133f9044..305305c38b32 100644 --- a/receiver/jaegerreceiver/jaeger_agent_test.go +++ b/receiver/jaegerreceiver/jaeger_agent_test.go @@ -201,7 +201,7 @@ func testJaegerAgent(t *testing.T, agentEndpoint string, receiverConfig *configu }, 10*time.Second, 5*time.Millisecond) gotTraces := sink.AllTraces() - require.Equal(t, 1, len(gotTraces)) + require.Len(t, gotTraces, 1) assert.EqualValues(t, td, gotTraces[0]) } diff --git a/receiver/jaegerreceiver/trace_receiver_test.go b/receiver/jaegerreceiver/trace_receiver_test.go index bec053c98600..d30a427a9168 100644 --- a/receiver/jaegerreceiver/trace_receiver_test.go +++ b/receiver/jaegerreceiver/trace_receiver_test.go @@ -104,7 +104,7 @@ func TestReception(t *testing.T) { assert.NoError(t, err, "should not have failed to create the Jaeger OpenCensus exporter") gotTraces := sink.AllTraces() - assert.Equal(t, 1, len(gotTraces)) + assert.Len(t, gotTraces, 1) assert.EqualValues(t, td, gotTraces[0]) } @@ -178,7 +178,7 @@ func TestGRPCReception(t *testing.T) { assert.NotNil(t, resp, "response should not have been nil") gotTraces := sink.AllTraces() - assert.Equal(t, 1, len(gotTraces)) + assert.Len(t, gotTraces, 1) want := expectedTraceData(now, nowPlus10min, nowPlus10min2sec) assert.Len(t, req.Batch.Spans, want.SpanCount(), "got a conflicting amount of spans") @@ -238,7 +238,7 @@ func TestGRPCReceptionWithTLS(t *testing.T) { assert.NotNil(t, resp, "response should not have been nil") gotTraces := sink.AllTraces() - assert.Equal(t, 1, len(gotTraces)) + assert.Len(t, gotTraces, 1) want := expectedTraceData(now, nowPlus10min, nowPlus10min2sec) assert.Len(t, req.Batch.Spans, want.SpanCount(), "got a conflicting amount of spans") diff --git a/receiver/k8sclusterreceiver/internal/cronjob/cronjobs_test.go b/receiver/k8sclusterreceiver/internal/cronjob/cronjobs_test.go index 02d6a417edd1..c27f92a7a610 100644 --- a/receiver/k8sclusterreceiver/internal/cronjob/cronjobs_test.go +++ b/receiver/k8sclusterreceiver/internal/cronjob/cronjobs_test.go @@ -43,7 +43,7 @@ func TestCronJobMetadata(t *testing.T) { actualMetadata := GetMetadata(cj) - require.Equal(t, 1, len(actualMetadata)) + require.Len(t, actualMetadata, 1) // Assert metadata from Pod. require.Equal(t, diff --git a/receiver/k8sclusterreceiver/internal/metadata/metadata_test.go b/receiver/k8sclusterreceiver/internal/metadata/metadata_test.go index cbc8fb72720e..e20657b4b831 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/metadata_test.go +++ b/receiver/k8sclusterreceiver/internal/metadata/metadata_test.go @@ -216,7 +216,7 @@ func TestGetMetadataUpdate(t *testing.T) { t.Run(tt.name, func(t *testing.T) { delta := GetMetadataUpdate(tt.args.oldMdata, tt.args.newMdata) if tt.metadataDelta != nil { - require.Equal(t, 1, len(delta)) + require.Len(t, delta, 1) require.Equal(t, *tt.metadataDelta, delta[0].MetadataDelta) } else { require.Zero(t, len(delta)) diff --git a/receiver/k8sclusterreceiver/internal/statefulset/statefulsets_test.go b/receiver/k8sclusterreceiver/internal/statefulset/statefulsets_test.go index ea768ca11913..9790cc849e20 100644 --- a/receiver/k8sclusterreceiver/internal/statefulset/statefulsets_test.go +++ b/receiver/k8sclusterreceiver/internal/statefulset/statefulsets_test.go @@ -56,7 +56,7 @@ func TestStatefulsetMetadata(t *testing.T) { actualMetadata := GetMetadata(ss) - require.Equal(t, 1, len(actualMetadata)) + require.Len(t, actualMetadata, 1) require.Equal(t, metadata.KubernetesMetadata{ diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go index 1c2ec492fc52..659aa0e42006 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator_test.go @@ -210,7 +210,7 @@ func TestMetadataErrorCases(t *testing.T) { tt.testScenario(acc) - assert.Equal(t, tt.numMDs, len(acc.m)) + assert.Len(t, acc.m, tt.numMDs) require.Equal(t, tt.numLogs, logs.Len()) for i := 0; i < tt.numLogs; i++ { assert.Equal(t, tt.logMessages[i], logs.All()[i].Message) diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go index 6fa10793bd28..7c7b99df4953 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go @@ -45,7 +45,7 @@ func TestMetricAccumulator(t *testing.T) { mbs.NodeMetricsBuilder.Reset() mbs.PodMetricsBuilder.Reset() mbs.OtherMetricsBuilder.Reset() - require.Equal(t, 0, len(MetricsData(zap.NewNop(), summary, k8sMetadata, map[MetricGroup]bool{}, mbs))) + require.Len(t, MetricsData(zap.NewNop(), summary, k8sMetadata, map[MetricGroup]bool{}, mbs), 0) } func requireMetricsOk(t *testing.T, mds []pmetric.Metrics) { diff --git a/receiver/opencensusreceiver/internal/octrace/observability_test.go b/receiver/opencensusreceiver/internal/octrace/observability_test.go index 9743e8b31fca..42728adb131e 100644 --- a/receiver/opencensusreceiver/internal/octrace/observability_test.go +++ b/receiver/opencensusreceiver/internal/octrace/observability_test.go @@ -109,7 +109,7 @@ func TestExportSpanLinkingMaintainsParentLink(t *testing.T) { // Inspection time! gotSpanData := tt.SpanRecorder.Ended() - assert.Equal(t, n+1, len(gotSpanData)) + assert.Len(t, gotSpanData, n+1) receiverSpanData := gotSpanData[0] assert.Len(t, receiverSpanData.Links(), 1) diff --git a/receiver/opencensusreceiver/opencensus_test.go b/receiver/opencensusreceiver/opencensus_test.go index cb5a8d73c343..271b1fcded54 100644 --- a/receiver/opencensusreceiver/opencensus_test.go +++ b/receiver/opencensusreceiver/opencensus_test.go @@ -584,7 +584,7 @@ func TestOCReceiverTrace_HandleNextConsumerResponse(t *testing.T) { assert.Equal(t, ingestionState.expectedCode, status.Code()) } - require.Equal(t, tt.expectedReceivedBatches, len(sink.AllTraces())) + require.Len(t, sink.AllTraces(), tt.expectedReceivedBatches) require.NoError(t, testTel.CheckReceiverTraces("grpc", int64(tt.expectedReceivedBatches), int64(tt.expectedIngestionBlockedRPCs))) }) } @@ -742,7 +742,7 @@ func TestOCReceiverMetrics_HandleNextConsumerResponse(t *testing.T) { assert.Equal(t, ingestionState.expectedCode, status.Code()) } - require.Equal(t, tt.expectedReceivedBatches, len(sink.AllMetrics())) + require.Len(t, sink.AllMetrics(), tt.expectedReceivedBatches) require.NoError(t, testTel.CheckReceiverMetrics("grpc", int64(tt.expectedReceivedBatches), int64(tt.expectedIngestionBlockedRPCs))) }) } diff --git a/receiver/otelarrowreceiver/otelarrow_test.go b/receiver/otelarrowreceiver/otelarrow_test.go index 2b433373dac1..4c1f606c18b0 100644 --- a/receiver/otelarrowreceiver/otelarrow_test.go +++ b/receiver/otelarrowreceiver/otelarrow_test.go @@ -131,7 +131,7 @@ func TestOTelArrowReceiverGRPCTracesIngestTest(t *testing.T) { assert.Equal(t, ingestionState.expectedCode, errStatus.Code()) } - require.Equal(t, expectedReceivedBatches, len(sink.AllTraces())) + require.Len(t, sink.AllTraces(), expectedReceivedBatches) expectedIngestionBlockedRPCs := 1 require.NoError(t, tt.CheckReceiverTraces("grpc", int64(expectedReceivedBatches), int64(expectedIngestionBlockedRPCs))) @@ -710,7 +710,7 @@ func TestGRPCArrowReceiverAuth(t *testing.T) { assert.NoError(t, cc.Close()) require.NoError(t, ocr.Shutdown(context.Background())) - assert.Equal(t, 0, len(sink.AllTraces())) + assert.Len(t, sink.AllTraces(), 0) } func TestConcurrentArrowReceiver(t *testing.T) { @@ -789,7 +789,7 @@ func TestConcurrentArrowReceiver(t *testing.T) { // Two spans per stream/item. require.Equal(t, itemsPerStream*numStreams*2, sink.SpanCount()) - require.Equal(t, itemsPerStream*numStreams, len(sink.Metadatas())) + require.Len(t, sink.Metadatas(), itemsPerStream*numStreams) for _, md := range sink.Metadatas() { val, err := strconv.Atoi(md.Get("seq")[0]) diff --git a/receiver/podmanreceiver/podman_test.go b/receiver/podmanreceiver/podman_test.go index 48f91cc3239e..c8f968cce9f2 100644 --- a/receiver/podmanreceiver/podman_test.go +++ b/receiver/podmanreceiver/podman_test.go @@ -176,7 +176,7 @@ func TestEventLoopHandles(t *testing.T) { cli := newContainerScraper(&eventClient, zap.NewNop(), &Config{}) assert.NotNil(t, cli) - assert.Equal(t, 0, len(cli.containers)) + assert.Len(t, cli.containers, 0) ctx, cancel := context.WithCancel(context.Background()) go cli.containerEventLoop(ctx) @@ -187,7 +187,7 @@ func TestEventLoopHandles(t *testing.T) { assert.Eventually(t, func() bool { cli.containersLock.Lock() defer cli.containersLock.Unlock() - return assert.Equal(t, 1, len(cli.containers)) + return assert.Len(t, cli.containers, 1) }, 1*time.Second, 1*time.Millisecond, "failed to update containers list.") eventChan <- event{ID: "c1", Status: "died"} @@ -195,7 +195,7 @@ func TestEventLoopHandles(t *testing.T) { assert.Eventually(t, func() bool { cli.containersLock.Lock() defer cli.containersLock.Unlock() - return assert.Equal(t, 0, len(cli.containers)) + return assert.Len(t, cli.containers, 0) }, 1*time.Second, 1*time.Millisecond, "failed to update containers list.") } @@ -210,10 +210,10 @@ func TestInspectAndPersistContainer(t *testing.T) { cli := newContainerScraper(&inspectClient, zap.NewNop(), &Config{}) assert.NotNil(t, cli) - assert.Equal(t, 0, len(cli.containers)) + assert.Len(t, cli.containers, 0) stats, ok := cli.inspectAndPersistContainer(context.Background(), "c1") assert.True(t, ok) assert.NotNil(t, stats) - assert.Equal(t, 1, len(cli.containers)) + assert.Len(t, cli.containers, 1) } diff --git a/receiver/podmanreceiver/record_metrics_test.go b/receiver/podmanreceiver/record_metrics_test.go index e5417204fa74..922a2d1f83f4 100644 --- a/receiver/podmanreceiver/record_metrics_test.go +++ b/receiver/podmanreceiver/record_metrics_test.go @@ -99,7 +99,7 @@ func assertMetricEqual(t *testing.T, m pmetric.Metric, dt pmetric.MetricType, pt } func assertPoints(t *testing.T, dpts pmetric.NumberDataPointSlice, pts []point) { - assert.Equal(t, dpts.Len(), len(pts)) + assert.Len(t, pts, dpts.Len()) for i, expected := range pts { got := dpts.At(i) assert.Equal(t, got.IntValue(), int64(expected.intVal)) diff --git a/receiver/prometheusreceiver/config_test.go b/receiver/prometheusreceiver/config_test.go index 70d519c55549..2903a940f972 100644 --- a/receiver/prometheusreceiver/config_test.go +++ b/receiver/prometheusreceiver/config_test.go @@ -90,7 +90,7 @@ func TestLoadTargetAllocatorConfig(t *testing.T) { assert.Equal(t, 30*time.Second, r0.TargetAllocator.Interval) assert.Equal(t, "collector-1", r0.TargetAllocator.CollectorID) - assert.Equal(t, 1, len(r1.PrometheusConfig.ScrapeConfigs)) + assert.Len(t, r1.PrometheusConfig.ScrapeConfigs, 1) assert.Equal(t, "demo", r1.PrometheusConfig.ScrapeConfigs[0].JobName) assert.Equal(t, promModel.Duration(5*time.Second), r1.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval) @@ -101,7 +101,7 @@ func TestLoadTargetAllocatorConfig(t *testing.T) { require.NoError(t, component.ValidateConfig(cfg)) r2 := cfg.(*Config) - assert.Equal(t, 1, len(r2.PrometheusConfig.ScrapeConfigs)) + assert.Len(t, r2.PrometheusConfig.ScrapeConfigs, 1) assert.Equal(t, "demo", r2.PrometheusConfig.ScrapeConfigs[0].JobName) assert.Equal(t, promModel.Duration(5*time.Second), r2.PrometheusConfig.ScrapeConfigs[0].ScrapeInterval) } diff --git a/receiver/prometheusreceiver/metrics_receiver_helper_test.go b/receiver/prometheusreceiver/metrics_receiver_helper_test.go index b4db87ca658e..d80bb2a8bd9c 100644 --- a/receiver/prometheusreceiver/metrics_receiver_helper_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_helper_test.go @@ -450,27 +450,27 @@ func assertMetricPresent(name string, metricTypeExpectations metricTypeComparato switch m.Type() { case pmetric.MetricTypeGauge: for _, npc := range de.numberPointComparator { - require.Equal(t, m.Gauge().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Gauge metric '%s' does not match to testdata", name) + require.Len(t, dataPointExpectations, m.Gauge().DataPoints().Len(), "Expected number of data-points in Gauge metric '%s' does not match to testdata", name) npc(t, m.Gauge().DataPoints().At(i)) } case pmetric.MetricTypeSum: for _, npc := range de.numberPointComparator { - require.Equal(t, m.Sum().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Sum metric '%s' does not match to testdata", name) + require.Len(t, dataPointExpectations, m.Sum().DataPoints().Len(), "Expected number of data-points in Sum metric '%s' does not match to testdata", name) npc(t, m.Sum().DataPoints().At(i)) } case pmetric.MetricTypeHistogram: for _, hpc := range de.histogramPointComparator { - require.Equal(t, m.Histogram().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Histogram metric '%s' does not match to testdata", name) + require.Len(t, dataPointExpectations, m.Histogram().DataPoints().Len(), "Expected number of data-points in Histogram metric '%s' does not match to testdata", name) hpc(t, m.Histogram().DataPoints().At(i)) } case pmetric.MetricTypeSummary: for _, spc := range de.summaryPointComparator { - require.Equal(t, m.Summary().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Summary metric '%s' does not match to testdata", name) + require.Len(t, dataPointExpectations, m.Summary().DataPoints().Len(), "Expected number of data-points in Summary metric '%s' does not match to testdata", name) spc(t, m.Summary().DataPoints().At(i)) } case pmetric.MetricTypeExponentialHistogram: for _, ehc := range de.exponentialHistogramComparator { - require.Equal(t, m.ExponentialHistogram().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Exponential Histogram metric '%s' does not match to testdata", name) + require.Len(t, dataPointExpectations, m.ExponentialHistogram().DataPoints().Len(), "Expected number of data-points in Exponential Histogram metric '%s' does not match to testdata", name) ehc(t, m.ExponentialHistogram().DataPoints().At(i)) } case pmetric.MetricTypeEmpty: diff --git a/receiver/redisreceiver/redis_svc_test.go b/receiver/redisreceiver/redis_svc_test.go index 1413d4a0c631..63b7a609a25a 100644 --- a/receiver/redisreceiver/redis_svc_test.go +++ b/receiver/redisreceiver/redis_svc_test.go @@ -17,6 +17,6 @@ func TestParser(t *testing.T) { s := newFakeAPIParser() info, err := s.info() require.NoError(t, err) - require.Equal(t, 130, len(info)) + require.Len(t, info, 130) require.Equal(t, "1.24", info["allocator_frag_ratio"]) // spot check } diff --git a/receiver/sapmreceiver/trace_receiver_test.go b/receiver/sapmreceiver/trace_receiver_test.go index 18a7e32ac43f..80436b4e29ab 100644 --- a/receiver/sapmreceiver/trace_receiver_test.go +++ b/receiver/sapmreceiver/trace_receiver_test.go @@ -350,7 +350,7 @@ func TestReception(t *testing.T) { // retrieve received traces got := sink.AllTraces() - assert.Equal(t, 1, len(got)) + assert.Len(t, got, 1) // compare what we got to what we wanted t.Log("Comparing expected data to trace data") @@ -414,7 +414,7 @@ func TestAccessTokenPassthrough(t *testing.T) { assert.NoError(t, resp.Body.Close()) got := sink.AllTraces() - assert.Equal(t, 1, len(got)) + assert.Len(t, got, 1) received := got[0].ResourceSpans() for i := 0; i < received.Len(); i++ { diff --git a/receiver/signalfxreceiver/receiver_test.go b/receiver/signalfxreceiver/receiver_test.go index 5e5a5e1fe813..9ed9c45d6ae5 100644 --- a/receiver/signalfxreceiver/receiver_test.go +++ b/receiver/signalfxreceiver/receiver_test.go @@ -879,7 +879,7 @@ func Test_sfxReceiver_EventAccessTokenPassthrough(t *testing.T) { assert.Equal(t, responseOK, bodyStr) got := sink.AllLogs() - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) tokenLabel := "" if accessTokenAttr, ok := got[0].ResourceLogs().At(0).Resource().Attributes().Get("com.splunk.signalfx.access_token"); ok { diff --git a/receiver/splunkhecreceiver/receiver_test.go b/receiver/splunkhecreceiver/receiver_test.go index 1d61f1161c1a..76fe5ec5a3b7 100644 --- a/receiver/splunkhecreceiver/receiver_test.go +++ b/receiver/splunkhecreceiver/receiver_test.go @@ -294,10 +294,10 @@ func Test_splunkhecReceiver_handleReq(t *testing.T) { assertHecSuccessResponse(t, resp, body) }, assertSink: func(t *testing.T, sink *consumertest.LogsSink) { - assert.Equal(t, 1, len(sink.AllLogs())) + assert.Len(t, sink.AllLogs(), 1) }, assertMetricsSink: func(t *testing.T, sink *consumertest.MetricsSink) { - assert.Equal(t, 0, len(sink.AllMetrics())) + assert.Len(t, sink.AllMetrics(), 0) }, }, { @@ -312,10 +312,10 @@ func Test_splunkhecReceiver_handleReq(t *testing.T) { assertHecSuccessResponse(t, resp, body) }, assertSink: func(t *testing.T, sink *consumertest.LogsSink) { - assert.Equal(t, 0, len(sink.AllLogs())) + assert.Len(t, sink.AllLogs(), 0) }, assertMetricsSink: func(t *testing.T, sink *consumertest.MetricsSink) { - assert.Equal(t, 1, len(sink.AllMetrics())) + assert.Len(t, sink.AllMetrics(), 1) }, }, { @@ -530,7 +530,7 @@ func Test_splunkhecReceiver_TLS(t *testing.T) { t.Log("Splunk HEC Request Received") got := sink.AllLogs() - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) assert.Equal(t, want, got[0]) } @@ -1593,7 +1593,7 @@ func Test_splunkhecReceiver_handleReq_WithAck(t *testing.T) { assertHecSuccessResponse(t, resp, body) }, assertSink: func(t *testing.T, sink *consumertest.LogsSink) { - assert.Equal(t, 1, len(sink.AllLogs())) + assert.Len(t, sink.AllLogs(), 1) }, }, { @@ -1614,7 +1614,7 @@ func Test_splunkhecReceiver_handleReq_WithAck(t *testing.T) { assert.Equal(t, map[string]any{"code": float64(10), "text": "Data channel is missing"}, body) }, assertSink: func(t *testing.T, sink *consumertest.LogsSink) { - assert.Equal(t, 0, len(sink.AllLogs())) + assert.Len(t, sink.AllLogs(), 0) }, }, { @@ -1635,7 +1635,7 @@ func Test_splunkhecReceiver_handleReq_WithAck(t *testing.T) { assert.Equal(t, map[string]any{"text": "Invalid data channel", "code": float64(11)}, body) }, assertSink: func(t *testing.T, sink *consumertest.LogsSink) { - assert.Equal(t, 0, len(sink.AllLogs())) + assert.Len(t, sink.AllLogs(), 0) }, }, { @@ -1660,7 +1660,7 @@ func Test_splunkhecReceiver_handleReq_WithAck(t *testing.T) { assertHecSuccessResponseWithAckID(t, resp, body, 1) }, assertSink: func(t *testing.T, sink *consumertest.LogsSink) { - assert.Equal(t, 1, len(sink.AllLogs())) + assert.Len(t, sink.AllLogs(), 1) }, }, { @@ -1685,7 +1685,7 @@ func Test_splunkhecReceiver_handleReq_WithAck(t *testing.T) { assertHecSuccessResponseWithAckID(t, resp, body, 1) }, assertSink: func(t *testing.T, sink *consumertest.LogsSink) { - assert.Equal(t, 1, len(sink.AllLogs())) + assert.Len(t, sink.AllLogs(), 1) }, }, { @@ -1709,7 +1709,7 @@ func Test_splunkhecReceiver_handleReq_WithAck(t *testing.T) { assertHecSuccessResponseWithAckID(t, resp, body, 1) }, assertSink: func(t *testing.T, sink *consumertest.LogsSink) { - assert.Equal(t, 1, len(sink.AllLogs())) + assert.Len(t, sink.AllLogs(), 1) }, }, } @@ -1880,7 +1880,7 @@ func Test_splunkhecReceiver_rawReqHasmetadataInResource(t *testing.T) { return req }(), assertResource: func(t *testing.T, got []plog.Logs) { - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) resources := got[0].ResourceLogs() assert.Equal(t, 1, resources.Len()) resource := resources.At(0).Resource().Attributes() @@ -1905,7 +1905,7 @@ func Test_splunkhecReceiver_rawReqHasmetadataInResource(t *testing.T) { return req }(), assertResource: func(t *testing.T, got []plog.Logs) { - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) resources := got[0].ResourceLogs() assert.Equal(t, 1, resources.Len()) resource := resources.At(0).Resource().Attributes() @@ -1930,7 +1930,7 @@ func Test_splunkhecReceiver_rawReqHasmetadataInResource(t *testing.T) { return req }(), assertResource: func(t *testing.T, got []plog.Logs) { - require.Equal(t, 1, len(got)) + require.Len(t, got, 1) resources := got[0].ResourceLogs() assert.Equal(t, 1, resources.Len()) resource := resources.At(0).Resource().Attributes() diff --git a/receiver/sqlqueryreceiver/integration_test.go b/receiver/sqlqueryreceiver/integration_test.go index ea1340c39db9..a58c6fcf31d3 100644 --- a/receiver/sqlqueryreceiver/integration_test.go +++ b/receiver/sqlqueryreceiver/integration_test.go @@ -607,7 +607,7 @@ func TestMysqlIntegrationMetrics(t *testing.T) { } func testAllSimpleLogs(t *testing.T, logs []plog.Logs) { - assert.Equal(t, 1, len(logs)) + assert.Len(t, logs, 1) assert.Equal(t, 1, logs[0].ResourceLogs().Len()) assert.Equal(t, 1, logs[0].ResourceLogs().At(0).ScopeLogs().Len()) expectedEntries := []string{ diff --git a/receiver/sqlserverreceiver/scraper_windows_test.go b/receiver/sqlserverreceiver/scraper_windows_test.go index 1f32d2aa4527..ca3aecf367ac 100644 --- a/receiver/sqlserverreceiver/scraper_windows_test.go +++ b/receiver/sqlserverreceiver/scraper_windows_test.go @@ -90,7 +90,7 @@ func TestSqlServerScraper(t *testing.T) { s := newSQLServerPCScraper(settings, cfg) assert.NoError(t, s.start(context.Background(), nil)) - assert.Equal(t, 0, len(s.watcherRecorders)) + assert.Len(t, s.watcherRecorders, 0) assert.Equal(t, 21, obsLogs.Len()) assert.Equal(t, 21, obsLogs.FilterMessageSnippet("failed to create perf counter with path \\SQLServer:").Len()) assert.Equal(t, 21, obsLogs.FilterMessageSnippet("The specified object was not found on the computer.").Len()) diff --git a/receiver/sshcheckreceiver/internal/configssh/configssh_test.go b/receiver/sshcheckreceiver/internal/configssh/configssh_test.go index 47097a5f0fdc..2b384653c4f5 100644 --- a/receiver/sshcheckreceiver/internal/configssh/configssh_test.go +++ b/receiver/sshcheckreceiver/internal/configssh/configssh_test.go @@ -118,7 +118,7 @@ func TestAllSSHClientSettings(t *testing.T) { assert.EqualValues(t, client.ClientConfig.User, test.settings.Username) if len(test.settings.KeyFile) > 0 || len(test.settings.Password) > 0 { - assert.EqualValues(t, 1, len(client.ClientConfig.Auth)) + assert.Len(t, client.ClientConfig.Auth, 1) } }) } @@ -192,7 +192,7 @@ func Test_Client_Dial(t *testing.T) { assert.EqualValues(t, client.HostKeyCallback, ssh.InsecureIgnoreHostKey()) //#nosec G106 } if len(test.settings.KeyFile) > 0 || len(test.settings.Password) > 0 { - assert.EqualValues(t, 1, len(client.ClientConfig.Auth)) + assert.Len(t, client.ClientConfig.Auth, 1) } }) } diff --git a/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go b/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go index 09cd313084ac..bc7bd2423516 100644 --- a/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go +++ b/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go @@ -1491,8 +1491,8 @@ func TestStatsDParser_Initialize(t *testing.T) { instrument := newInstruments(addr) instrument.gauges[teststatsdDMetricdescription] = pmetric.ScopeMetrics{} p.instrumentsByAddress[addrKey] = instrument - assert.Equal(t, 1, len(p.instrumentsByAddress)) - assert.Equal(t, 1, len(p.instrumentsByAddress[addrKey].gauges)) + assert.Len(t, p.instrumentsByAddress, 1) + assert.Len(t, p.instrumentsByAddress[addrKey].gauges, 1) assert.Equal(t, GaugeObserver, p.timerEvents.method) assert.Equal(t, GaugeObserver, p.histogramEvents.method) } diff --git a/receiver/statsdreceiver/internal/transport/server_test.go b/receiver/statsdreceiver/internal/transport/server_test.go index a774c0fd4154..f8cb0ba516d8 100644 --- a/receiver/statsdreceiver/internal/transport/server_test.go +++ b/receiver/statsdreceiver/internal/transport/server_test.go @@ -89,7 +89,7 @@ func Test_Server_ListenAndServe(t *testing.T) { assert.NoError(t, err) wgListenAndServe.Wait() - assert.Equal(t, 1, len(transferChan)) + assert.Len(t, transferChan, 1) }) } } diff --git a/receiver/vcenterreceiver/client_test.go b/receiver/vcenterreceiver/client_test.go index 7cb30b7e60f1..49576b5df859 100644 --- a/receiver/vcenterreceiver/client_test.go +++ b/receiver/vcenterreceiver/client_test.go @@ -230,7 +230,7 @@ func TestDatacenterInventoryListObjects(t *testing.T) { } dcs, err := client.DatacenterInventoryListObjects(ctx) require.NoError(t, err) - require.Equal(t, len(dcs), 2) + require.Len(t, dcs, 2) }, vpx) } diff --git a/receiver/wavefrontreceiver/receiver_test.go b/receiver/wavefrontreceiver/receiver_test.go index 0376fe4802e8..cd0859f755d0 100644 --- a/receiver/wavefrontreceiver/receiver_test.go +++ b/receiver/wavefrontreceiver/receiver_test.go @@ -123,7 +123,7 @@ func Test_wavefrontreceiver_EndToEnd(t *testing.T) { numMetrics = 1 } n, err := fmt.Fprint(conn, tt.msg) - assert.Equal(t, len(tt.msg), n) + assert.Len(t, tt.msg, n) assert.NoError(t, err) require.NoError(t, conn.Close()) diff --git a/receiver/windowseventlogreceiver/receiver_windows_test.go b/receiver/windowseventlogreceiver/receiver_windows_test.go index ae134f20c28c..0427254a145f 100644 --- a/receiver/windowseventlogreceiver/receiver_windows_test.go +++ b/receiver/windowseventlogreceiver/receiver_windows_test.go @@ -272,7 +272,7 @@ func requireExpectedLogRecords(t *testing.T, sink *consumertest.LogsSink, expect // logs sometimes take a while to be written, so a substantial wait buffer is needed require.EventuallyWithT(t, func(c *assert.CollectT) { actualLogRecords = filterAllLogRecordsBySource(t, sink, expectedEventSrc) - assert.Equal(c, expectedEventCount, len(actualLogRecords)) + assert.Len(c, actualLogRecords, expectedEventCount) }, 10*time.Second, 250*time.Millisecond) return actualLogRecords diff --git a/testbed/testbed/validator.go b/testbed/testbed/validator.go index d7cc2fcfb48c..e3524be399cc 100644 --- a/testbed/testbed/validator.go +++ b/testbed/testbed/validator.go @@ -128,7 +128,7 @@ func (v *CorrectnessTestValidator) Validate(tc *TestCase) { if len(tc.MockBackend.ReceivedTraces) > 0 { v.assertSentRecdTracingDataEqual(append(tc.MockBackend.ReceivedTraces, tc.MockBackend.DroppedTraces...)) } - assert.EqualValues(tc.t, 0, len(v.assertionFailures), "There are span data mismatches.") + assert.Len(tc.t, v.assertionFailures, 0, "There are span data mismatches.") } func (v *CorrectnessTestValidator) RecordResults(tc *TestCase) { diff --git a/testbed/tests/syslog_integration_test.go b/testbed/tests/syslog_integration_test.go index 933d1d8104e4..97dd08583cb1 100644 --- a/testbed/tests/syslog_integration_test.go +++ b/testbed/tests/syslog_integration_test.go @@ -182,10 +182,10 @@ service: time.Sleep(100 * time.Millisecond) } - require.Equal(t, len(backend.ReceivedLogs), 1) + require.Len(t, backend.ReceivedLogs, 1) require.Equal(t, backend.ReceivedLogs[0].ResourceLogs().Len(), 1) require.Equal(t, backend.ReceivedLogs[0].ResourceLogs().At(0).ScopeLogs().Len(), 1) - require.Equal(t, backend.ReceivedLogs[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().Len(), len(expectedData)) + require.Len(t, expectedData, backend.ReceivedLogs[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().Len()) // Clean received logs attributes := []map[string]any{} From 0ec84e697583b92e9f5f49f937cff3e8b6718353 Mon Sep 17 00:00:00 2001 From: Curtis Robert Date: Fri, 30 Aug 2024 00:39:49 -0700 Subject: [PATCH 09/10] [chore][receiver/nginx] Update README for consistency (#34923) **Description:** This just cleans up the README a bit: 1. Make case of `NGINX` consistent. Happy to change it to `nginx` if that's preferred. 2. Remove empty `Details` section 3. Cleanup description of the `collection_interval` config option. This is the same config option many other receivers have, so I don't think the concept needs explained in-depth here. 4. Remove disclaimer that the component's status is beta and configuration is subject to change. The README's header states this component is beta, so I don't think it needs to be repeated. --- receiver/nginxreceiver/README.md | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/receiver/nginxreceiver/README.md b/receiver/nginxreceiver/README.md index b346f8b6986a..f2a5517c6eeb 100644 --- a/receiver/nginxreceiver/README.md +++ b/receiver/nginxreceiver/README.md @@ -1,4 +1,4 @@ -# Nginx Receiver +# NGINX Receiver | Status | | @@ -12,13 +12,12 @@ [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -This receiver can fetch stats from a Nginx instance using the `ngx_http_stub_status_module` module's `status` endpoint. - -## Details +This receiver can fetch stats from a NGINX instance using the `ngx_http_stub_status_module` module's `status` endpoint. ## Configuration -### Nginx Module +### NGINX Module + You must configure NGINX to expose status information by editing the NGINX configuration. Please see [ngx_http_stub_status_module](http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) @@ -26,20 +25,14 @@ for a guide to configuring the NGINX stats module `ngx_http_stub_status_module`. ### Receiver Config -> :information_source: This receiver is in beta and configuration fields are subject to change. - The following settings are required: -- `endpoint` (default: `http://localhost:80/status`): The URL of the nginx status endpoint +- `endpoint` (default: `http://localhost:80/status`): The URL of the NGINX status endpoint The following settings are optional: -- `collection_interval` (default = `10s`): This receiver runs on an interval. -Each time it runs, it queries nginx, creates metrics, and sends them to the -next consumer. The `collection_interval` configuration option tells this -receiver the duration between runs. This value must be a string readable by -Golang's `ParseDuration` function (example: `1h30m`). Valid time units are -`ns`, `us` (or `ยตs`), `ms`, `s`, `m`, `h`. +- `collection_interval` (default = `10s`): This receiver collects metrics on an interval. This value must be a string readable by Golang's [time.ParseDuration](https://pkg.go.dev/time#ParseDuration). Valid time units are `ns`, `us` (or `ยตs`), `ms`, `s`, `m`, `h`. + - `initial_delay` (default = `1s`): defines how long this receiver waits before starting. Example: From 184e954bf6599d1ef725307a2fe2a5af7c780540 Mon Sep 17 00:00:00 2001 From: Florian Bacher Date: Fri, 30 Aug 2024 11:03:10 +0200 Subject: [PATCH 10/10] [processor/redaction] add support for redacting metrics and logs attributes (#34609) **Description:** This PR extends the redaction processor to also support the redaction of attributes within logs and metrics. **Link to tracking Issue:** #34479 **Testing:** Extended the existing unit tests to also cover the redaction of logs and metrics **Documentation:** Adapted the readme to reflect the changes --------- Signed-off-by: Florian Bacher --- .chloggen/redaction-add-metrics-and-logs.yaml | 27 + processor/redactionprocessor/README.md | 22 +- processor/redactionprocessor/factory.go | 46 ++ processor/redactionprocessor/factory_test.go | 18 + .../generated_component_test.go | 14 + .../internal/metadata/generated_status.go | 4 +- processor/redactionprocessor/metadata.yaml | 1 + processor/redactionprocessor/processor.go | 75 +++ .../redactionprocessor/processor_test.go | 520 +++++++++++++----- 9 files changed, 582 insertions(+), 145 deletions(-) create mode 100644 .chloggen/redaction-add-metrics-and-logs.yaml diff --git a/.chloggen/redaction-add-metrics-and-logs.yaml b/.chloggen/redaction-add-metrics-and-logs.yaml new file mode 100644 index 000000000000..d4acba3f6889 --- /dev/null +++ b/.chloggen/redaction-add-metrics-and-logs.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: redactionprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add support for logs and metrics + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [34479] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/processor/redactionprocessor/README.md b/processor/redactionprocessor/README.md index f5c54d306206..aa2d5956b1db 100644 --- a/processor/redactionprocessor/README.md +++ b/processor/redactionprocessor/README.md @@ -3,19 +3,21 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: traces | +| Stability | [alpha]: logs, metrics | +| | [beta]: traces | | Distributions | [contrib] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Fredaction%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Fredaction) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Fredaction%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Fredaction) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@dmitryax](https://www.github.com/dmitryax), [@mx-psi](https://www.github.com/mx-psi), [@TylerHelmuth](https://www.github.com/TylerHelmuth) | | Emeritus | [@leonsp-ai](https://www.github.com/leonsp-ai) | +[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -This processor deletes span attributes that don't match a list of allowed span -attributes. It also masks span attribute values that match a blocked value -list. Span attributes that aren't on the allowed list are removed before any +This processor deletes span, log, and metric datapoint attributes that don't match a list of allowed +attributes. It also masks attribute values that match a blocked value +list. Attributes that aren't on the allowed list are removed before any value checks are done. ## Use Cases @@ -57,9 +59,9 @@ processors: # allowed_keys list. The list of blocked_values is applied regardless. If # you just want to block values, set this to true. allow_all_keys: false - # allowed_keys is a list of span attribute keys that are kept on the span and + # allowed_keys is a list of span/log/datapoint attribute keys that are kept on the span/log/datapoint and # processed. The list is designed to fail closed. If allowed_keys is empty, - # no span attributes are allowed and all span attributes are removed. To + # no attributes are allowed and all span attributes are removed. To # allow all keys, set allow_all_keys to true. allowed_keys: - description @@ -76,7 +78,7 @@ processors: - "4[0-9]{12}(?:[0-9]{3})?" ## Visa credit card number - "(5[1-5][0-9]{14})" ## MasterCard number # summary controls the verbosity level of the diagnostic attributes that - # the processor adds to the spans when it redacts or masks other + # the processor adds to the spans/logs/datapoints when it redacts or masks other # attributes. In some contexts a list of redacted attributes leaks # information, while it is valuable when integrating and testing a new # configuration. Possible values: @@ -93,8 +95,8 @@ Ignored attributes are processed first so they're always allowed and never blocked. This field should only be used where you know the data is always safe to send to the telemetry system. -Only span attributes included on the list of allowed keys list are retained. -If `allowed_keys` is empty, then no span attributes are allowed. All span +Only span/log/datapoint attributes included on the list of allowed keys list are retained. +If `allowed_keys` is empty, then no attributes are allowed. All attributes are removed in that case. To keep all span attributes, you should explicitly set `allow_all_keys` to true. @@ -102,7 +104,7 @@ explicitly set `allow_all_keys` to true. allowed key matches the regular expression for a blocked value, the matching part of the value is then masked with a fixed length of asterisks. -For example, if `notes` is on the list of allowed keys, then the `notes` span +For example, if `notes` is on the list of allowed keys, then the `notes` attribute is retained. However, if there is a value such as a credit card number in the `notes` field that matched a regular expression on the list of blocked values, then that value is masked. diff --git a/processor/redactionprocessor/factory.go b/processor/redactionprocessor/factory.go index 7ef213e475ef..b900a03e512f 100644 --- a/processor/redactionprocessor/factory.go +++ b/processor/redactionprocessor/factory.go @@ -23,6 +23,8 @@ func NewFactory() processor.Factory { metadata.Type, createDefaultConfig, processor.WithTraces(createTracesProcessor, metadata.TracesStability), + processor.WithLogs(createLogsProcessor, metadata.LogsStability), + processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability), ) } @@ -53,3 +55,47 @@ func createTracesProcessor( redaction.processTraces, processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true})) } + +// createLogsProcessor creates an instance of redaction for processing logs +func createLogsProcessor( + ctx context.Context, + set processor.Settings, + cfg component.Config, + next consumer.Logs) (processor.Logs, error) { + oCfg := cfg.(*Config) + + red, err := newRedaction(ctx, oCfg, set.Logger) + if err != nil { + return nil, fmt.Errorf("error creating a redaction processor: %w", err) + } + + return processorhelper.NewLogsProcessor( + ctx, + set, + cfg, + next, + red.processLogs, + processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true})) +} + +// createMetricsProcessor creates an instance of redaction for processing metrics +func createMetricsProcessor( + ctx context.Context, + set processor.Settings, + cfg component.Config, + next consumer.Metrics) (processor.Metrics, error) { + oCfg := cfg.(*Config) + + red, err := newRedaction(ctx, oCfg, set.Logger) + if err != nil { + return nil, fmt.Errorf("error creating a redaction processor: %w", err) + } + + return processorhelper.NewMetricsProcessor( + ctx, + set, + cfg, + next, + red.processMetrics, + processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true})) +} diff --git a/processor/redactionprocessor/factory_test.go b/processor/redactionprocessor/factory_test.go index 7afe5741c794..490b59a38ab9 100644 --- a/processor/redactionprocessor/factory_test.go +++ b/processor/redactionprocessor/factory_test.go @@ -26,3 +26,21 @@ func TestCreateTestProcessor(t *testing.T) { assert.NotNil(t, tp) assert.True(t, tp.Capabilities().MutatesData) } + +func TestCreateTestLogsProcessor(t *testing.T) { + cfg := &Config{} + + tp, err := createLogsProcessor(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) + assert.NoError(t, err) + assert.NotNil(t, tp) + assert.Equal(t, true, tp.Capabilities().MutatesData) +} + +func TestCreateTestMetricsProcessor(t *testing.T) { + cfg := &Config{} + + tp, err := createMetricsProcessor(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) + assert.NoError(t, err) + assert.NotNil(t, tp) + assert.Equal(t, true, tp.Capabilities().MutatesData) +} diff --git a/processor/redactionprocessor/generated_component_test.go b/processor/redactionprocessor/generated_component_test.go index a59a3b22d7d3..baa14ee81730 100644 --- a/processor/redactionprocessor/generated_component_test.go +++ b/processor/redactionprocessor/generated_component_test.go @@ -36,6 +36,20 @@ func TestComponentLifecycle(t *testing.T) { createFn func(ctx context.Context, set processor.Settings, cfg component.Config) (component.Component, error) }{ + { + name: "logs", + createFn: func(ctx context.Context, set processor.Settings, cfg component.Config) (component.Component, error) { + return factory.CreateLogsProcessor(ctx, set, cfg, consumertest.NewNop()) + }, + }, + + { + name: "metrics", + createFn: func(ctx context.Context, set processor.Settings, cfg component.Config) (component.Component, error) { + return factory.CreateMetricsProcessor(ctx, set, cfg, consumertest.NewNop()) + }, + }, + { name: "traces", createFn: func(ctx context.Context, set processor.Settings, cfg component.Config) (component.Component, error) { diff --git a/processor/redactionprocessor/internal/metadata/generated_status.go b/processor/redactionprocessor/internal/metadata/generated_status.go index 4dd78cb1284d..4d9e987c1961 100644 --- a/processor/redactionprocessor/internal/metadata/generated_status.go +++ b/processor/redactionprocessor/internal/metadata/generated_status.go @@ -12,5 +12,7 @@ var ( ) const ( - TracesStability = component.StabilityLevelBeta + LogsStability = component.StabilityLevelAlpha + MetricsStability = component.StabilityLevelAlpha + TracesStability = component.StabilityLevelBeta ) diff --git a/processor/redactionprocessor/metadata.yaml b/processor/redactionprocessor/metadata.yaml index 8b4d5d286edc..466150b8ce13 100644 --- a/processor/redactionprocessor/metadata.yaml +++ b/processor/redactionprocessor/metadata.yaml @@ -4,6 +4,7 @@ status: class: processor stability: beta: [traces] + alpha: [logs,metrics] distributions: [contrib] codeowners: active: [dmitryax, mx-psi, TylerHelmuth] diff --git a/processor/redactionprocessor/processor.go b/processor/redactionprocessor/processor.go index 57368a1878a1..ae07d3819a3f 100644 --- a/processor/redactionprocessor/processor.go +++ b/processor/redactionprocessor/processor.go @@ -11,6 +11,8 @@ import ( "strings" "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -59,6 +61,22 @@ func (s *redaction) processTraces(ctx context.Context, batch ptrace.Traces) (ptr return batch, nil } +func (s *redaction) processLogs(ctx context.Context, logs plog.Logs) (plog.Logs, error) { + for i := 0; i < logs.ResourceLogs().Len(); i++ { + rl := logs.ResourceLogs().At(i) + s.processResourceLog(ctx, rl) + } + return logs, nil +} + +func (s *redaction) processMetrics(ctx context.Context, metrics pmetric.Metrics) (pmetric.Metrics, error) { + for i := 0; i < metrics.ResourceMetrics().Len(); i++ { + rm := metrics.ResourceMetrics().At(i) + s.processResourceMetric(ctx, rm) + } + return metrics, nil +} + // processResourceSpan processes the RS and all of its spans and then returns the last // view metric context. The context can be used for tests func (s *redaction) processResourceSpan(ctx context.Context, rs ptrace.ResourceSpans) { @@ -79,6 +97,63 @@ func (s *redaction) processResourceSpan(ctx context.Context, rs ptrace.ResourceS } } +// processResourceLog processes the log resource and all of its logs and then returns the last +// view metric context. The context can be used for tests +func (s *redaction) processResourceLog(ctx context.Context, rl plog.ResourceLogs) { + rsAttrs := rl.Resource().Attributes() + + s.processAttrs(ctx, rsAttrs) + + for j := 0; j < rl.ScopeLogs().Len(); j++ { + ils := rl.ScopeLogs().At(j) + for k := 0; k < rl.ScopeLogs().Len(); k++ { + log := ils.LogRecords().At(k) + s.processAttrs(ctx, log.Attributes()) + } + } +} + +func (s *redaction) processResourceMetric(ctx context.Context, rm pmetric.ResourceMetrics) { + rsAttrs := rm.Resource().Attributes() + + s.processAttrs(ctx, rsAttrs) + + for j := 0; j < rm.ScopeMetrics().Len(); j++ { + ils := rm.ScopeMetrics().At(j) + for k := 0; k < ils.Metrics().Len(); k++ { + metric := ils.Metrics().At(k) + switch metric.Type() { + case pmetric.MetricTypeGauge: + dps := metric.Gauge().DataPoints() + for i := 0; i < dps.Len(); i++ { + s.processAttrs(ctx, dps.At(i).Attributes()) + } + case pmetric.MetricTypeSum: + dps := metric.Sum().DataPoints() + for i := 0; i < dps.Len(); i++ { + s.processAttrs(ctx, dps.At(i).Attributes()) + } + case pmetric.MetricTypeHistogram: + dps := metric.Histogram().DataPoints() + for i := 0; i < dps.Len(); i++ { + s.processAttrs(ctx, dps.At(i).Attributes()) + } + case pmetric.MetricTypeExponentialHistogram: + dps := metric.ExponentialHistogram().DataPoints() + for i := 0; i < dps.Len(); i++ { + s.processAttrs(ctx, dps.At(i).Attributes()) + } + case pmetric.MetricTypeSummary: + dps := metric.Summary().DataPoints() + for i := 0; i < dps.Len(); i++ { + s.processAttrs(ctx, dps.At(i).Attributes()) + } + case pmetric.MetricTypeEmpty: + } + } + } +} + // processAttrs redacts the attributes of a resource span or a span func (s *redaction) processAttrs(_ context.Context, attributes pcommon.Map) { // TODO: Use the context for recording metrics diff --git a/processor/redactionprocessor/processor_test.go b/processor/redactionprocessor/processor_test.go index b44ab20c950e..acc11d817c81 100644 --- a/processor/redactionprocessor/processor_test.go +++ b/processor/redactionprocessor/processor_test.go @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap/zaptest" ) @@ -35,16 +37,33 @@ func TestRedactUnknownAttributes(t *testing.T) { } outTraces := runTest(t, allowed, redacted, nil, ignored, config) - - attr := outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes() - for k, v := range allowed { - val, ok := attr.Get(k) - assert.True(t, ok) - assert.Equal(t, v.AsRaw(), val.AsRaw()) - } - for k := range redacted { - _, ok := attr.Get(k) - assert.False(t, ok) + outLogs := runLogsTest(t, allowed, redacted, nil, ignored, config) + outMetricsGauge := runMetricsTest(t, allowed, redacted, nil, ignored, config, pmetric.MetricTypeGauge) + outMetricsSum := runMetricsTest(t, allowed, redacted, nil, ignored, config, pmetric.MetricTypeSum) + outMetricsHistogram := runMetricsTest(t, allowed, redacted, nil, ignored, config, pmetric.MetricTypeHistogram) + outMetricsExponentialHistogram := runMetricsTest(t, allowed, redacted, nil, ignored, config, pmetric.MetricTypeExponentialHistogram) + outMetricsSummary := runMetricsTest(t, allowed, redacted, nil, ignored, config, pmetric.MetricTypeSummary) + + attrs := []pcommon.Map{ + outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes(), + outLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes(), + outMetricsGauge.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes(), + outMetricsSum.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes(), + outMetricsHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Histogram().DataPoints().At(0).Attributes(), + outMetricsExponentialHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).ExponentialHistogram().DataPoints().At(0).Attributes(), + outMetricsSummary.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Summary().DataPoints().At(0).Attributes(), + } + + for _, attr := range attrs { + for k, v := range allowed { + val, ok := attr.Get(k) + assert.True(t, ok) + assert.Equal(t, v.AsRaw(), val.AsRaw()) + } + for k := range redacted { + _, ok := attr.Get(k) + assert.False(t, ok) + } } } @@ -63,15 +82,32 @@ func TestAllowAllKeys(t *testing.T) { } outTraces := runTest(t, allowed, nil, nil, nil, config) - - attr := outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes() - for k, v := range allowed { - val, ok := attr.Get(k) - assert.True(t, ok) - assert.Equal(t, v.AsRaw(), val.AsRaw()) + outLogs := runLogsTest(t, allowed, nil, nil, nil, config) + outMetricsGauge := runMetricsTest(t, allowed, nil, nil, nil, config, pmetric.MetricTypeGauge) + outMetricsSum := runMetricsTest(t, allowed, nil, nil, nil, config, pmetric.MetricTypeSum) + outMetricsHistogram := runMetricsTest(t, allowed, nil, nil, nil, config, pmetric.MetricTypeHistogram) + outMetricsExponentialHistogram := runMetricsTest(t, allowed, nil, nil, nil, config, pmetric.MetricTypeExponentialHistogram) + outMetricsSummary := runMetricsTest(t, allowed, nil, nil, nil, config, pmetric.MetricTypeSummary) + + attrs := []pcommon.Map{ + outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes(), + outLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes(), + outMetricsGauge.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes(), + outMetricsSum.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes(), + outMetricsHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Histogram().DataPoints().At(0).Attributes(), + outMetricsExponentialHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).ExponentialHistogram().DataPoints().At(0).Attributes(), + outMetricsSummary.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Summary().DataPoints().At(0).Attributes(), + } + + for _, attr := range attrs { + for k, v := range allowed { + val, ok := attr.Get(k) + assert.True(t, ok) + assert.Equal(t, v.AsRaw(), val.AsRaw()) + } + value, _ := attr.Get("name") + assert.Equal(t, "placeholder", value.Str()) } - value, _ := attr.Get("name") - assert.Equal(t, "placeholder", value.Str()) } // TestAllowAllKeysMaskValues validates that the processor still redacts @@ -92,15 +128,32 @@ func TestAllowAllKeysMaskValues(t *testing.T) { } outTraces := runTest(t, allowed, nil, masked, nil, config) - - attr := outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes() - for k, v := range allowed { - val, ok := attr.Get(k) - assert.True(t, ok) - assert.Equal(t, v.AsRaw(), val.AsRaw()) + outLogs := runLogsTest(t, allowed, nil, masked, nil, config) + outMetricsGauge := runMetricsTest(t, allowed, nil, masked, nil, config, pmetric.MetricTypeGauge) + outMetricsSum := runMetricsTest(t, allowed, nil, masked, nil, config, pmetric.MetricTypeSum) + outMetricsHistogram := runMetricsTest(t, allowed, nil, masked, nil, config, pmetric.MetricTypeHistogram) + outMetricsExponentialHistogram := runMetricsTest(t, allowed, nil, masked, nil, config, pmetric.MetricTypeExponentialHistogram) + outMetricsSummary := runMetricsTest(t, allowed, nil, masked, nil, config, pmetric.MetricTypeSummary) + + attrs := []pcommon.Map{ + outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes(), + outLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes(), + outMetricsGauge.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes(), + outMetricsSum.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes(), + outMetricsHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Histogram().DataPoints().At(0).Attributes(), + outMetricsExponentialHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).ExponentialHistogram().DataPoints().At(0).Attributes(), + outMetricsSummary.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Summary().DataPoints().At(0).Attributes(), + } + + for _, attr := range attrs { + for k, v := range allowed { + val, ok := attr.Get(k) + assert.True(t, ok) + assert.Equal(t, v.AsRaw(), val.AsRaw()) + } + value, _ := attr.Get("credit_card") + assert.Equal(t, "placeholder ****", value.Str()) } - value, _ := attr.Get("credit_card") - assert.Equal(t, "placeholder ****", value.Str()) } // TODO: Test redaction with metric tags in a metrics PR @@ -131,35 +184,52 @@ func TestRedactSummaryDebug(t *testing.T) { } outTraces := runTest(t, allowed, redacted, masked, ignored, config) + outLogs := runLogsTest(t, allowed, redacted, masked, ignored, config) + outMetricsGauge := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeGauge) + outMetricsSum := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeSum) + outMetricsHistogram := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeHistogram) + outMetricsExponentialHistogram := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeExponentialHistogram) + outMetricsSummary := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeSummary) + + attrs := []pcommon.Map{ + outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes(), + outLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes(), + outMetricsGauge.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes(), + outMetricsSum.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes(), + outMetricsHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Histogram().DataPoints().At(0).Attributes(), + outMetricsExponentialHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).ExponentialHistogram().DataPoints().At(0).Attributes(), + outMetricsSummary.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Summary().DataPoints().At(0).Attributes(), + } + + for _, attr := range attrs { + deleted := make([]string, 0, len(redacted)) + for k := range redacted { + _, ok := attr.Get(k) + assert.False(t, ok) + deleted = append(deleted, k) + } + maskedKeys, ok := attr.Get(redactedKeys) + assert.True(t, ok) + sort.Strings(deleted) + assert.Equal(t, strings.Join(deleted, ","), maskedKeys.Str()) + maskedKeyCount, ok := attr.Get(redactedKeyCount) + assert.True(t, ok) + assert.Equal(t, int64(len(deleted)), maskedKeyCount.Int()) - attr := outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes() - deleted := make([]string, 0, len(redacted)) - for k := range redacted { - _, ok := attr.Get(k) - assert.False(t, ok) - deleted = append(deleted, k) - } - maskedKeys, ok := attr.Get(redactedKeys) - assert.True(t, ok) - sort.Strings(deleted) - assert.Equal(t, strings.Join(deleted, ","), maskedKeys.Str()) - maskedKeyCount, ok := attr.Get(redactedKeyCount) - assert.True(t, ok) - assert.Equal(t, int64(len(deleted)), maskedKeyCount.Int()) - - ignoredKeyCount, ok := attr.Get(ignoredKeyCount) - assert.True(t, ok) - assert.Equal(t, int64(len(ignored)), ignoredKeyCount.Int()) - - blockedKeys := []string{"name"} - maskedValues, ok := attr.Get(maskedValues) - assert.True(t, ok) - assert.Equal(t, strings.Join(blockedKeys, ","), maskedValues.Str()) - maskedValueCount, ok := attr.Get(maskedValueCount) - assert.True(t, ok) - assert.Equal(t, int64(1), maskedValueCount.Int()) - value, _ := attr.Get("name") - assert.Equal(t, "placeholder ****", value.Str()) + ignoredKeyCount, ok := attr.Get(ignoredKeyCount) + assert.True(t, ok) + assert.Equal(t, int64(len(ignored)), ignoredKeyCount.Int()) + + blockedKeys := []string{"name"} + maskedValues, ok := attr.Get(maskedValues) + assert.True(t, ok) + assert.Equal(t, strings.Join(blockedKeys, ","), maskedValues.Str()) + maskedValueCount, ok := attr.Get(maskedValueCount) + assert.True(t, ok) + assert.Equal(t, int64(1), maskedValueCount.Int()) + value, _ := attr.Get("name") + assert.Equal(t, "placeholder ****", value.Str()) + } } // TestRedactSummaryInfo validates that the processor writes a verbose summary @@ -186,33 +256,50 @@ func TestRedactSummaryInfo(t *testing.T) { } outTraces := runTest(t, allowed, redacted, masked, ignored, config) - - attr := outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes() - deleted := make([]string, 0, len(redacted)) - for k := range redacted { - _, ok := attr.Get(k) + outLogs := runLogsTest(t, allowed, redacted, masked, ignored, config) + outMetricsGauge := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeGauge) + outMetricsSum := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeSum) + outMetricsHistogram := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeHistogram) + outMetricsExponentialHistogram := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeExponentialHistogram) + outMetricsSummary := runMetricsTest(t, allowed, redacted, masked, ignored, config, pmetric.MetricTypeSummary) + + attrs := []pcommon.Map{ + outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes(), + outLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes(), + outMetricsGauge.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes(), + outMetricsSum.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes(), + outMetricsHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Histogram().DataPoints().At(0).Attributes(), + outMetricsExponentialHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).ExponentialHistogram().DataPoints().At(0).Attributes(), + outMetricsSummary.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Summary().DataPoints().At(0).Attributes(), + } + + for _, attr := range attrs { + deleted := make([]string, 0, len(redacted)) + for k := range redacted { + _, ok := attr.Get(k) + assert.False(t, ok) + deleted = append(deleted, k) + } + _, ok := attr.Get(redactedKeys) assert.False(t, ok) - deleted = append(deleted, k) - } - _, ok := attr.Get(redactedKeys) - assert.False(t, ok) - maskedKeyCount, ok := attr.Get(redactedKeyCount) - assert.True(t, ok) - assert.Equal(t, int64(len(deleted)), maskedKeyCount.Int()) - _, ok = attr.Get(maskedValues) - assert.False(t, ok) - - maskedValueCount, ok := attr.Get(maskedValueCount) - assert.True(t, ok) - assert.Equal(t, int64(1), maskedValueCount.Int()) - value, _ := attr.Get("name") - assert.Equal(t, "placeholder ****", value.Str()) - - ignoredKeyCount, ok := attr.Get(ignoredKeyCount) - assert.True(t, ok) - assert.Equal(t, int64(1), ignoredKeyCount.Int()) - value, _ = attr.Get("safe_attribute") - assert.Equal(t, "harmless but suspicious 4111111111111141", value.Str()) + maskedKeyCount, ok := attr.Get(redactedKeyCount) + assert.True(t, ok) + assert.Equal(t, int64(len(deleted)), maskedKeyCount.Int()) + _, ok = attr.Get(maskedValues) + assert.False(t, ok) + + maskedValueCount, ok := attr.Get(maskedValueCount) + assert.True(t, ok) + assert.Equal(t, int64(1), maskedValueCount.Int()) + value, _ := attr.Get("name") + assert.Equal(t, "placeholder ****", value.Str()) + + ignoredKeyCount, ok := attr.Get(ignoredKeyCount) + assert.True(t, ok) + assert.Equal(t, int64(1), ignoredKeyCount.Int()) + value, _ = attr.Get("safe_attribute") + assert.Equal(t, "harmless but suspicious 4111111111111141", value.Str()) + } } // TestRedactSummarySilent validates that the processor does not create the @@ -232,22 +319,39 @@ func TestRedactSummarySilent(t *testing.T) { } outTraces := runTest(t, allowed, redacted, masked, nil, config) - - attr := outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes() - for k := range redacted { - _, ok := attr.Get(k) + outLogs := runLogsTest(t, allowed, redacted, masked, nil, config) + outMetricsGauge := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeGauge) + outMetricsSum := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeSum) + outMetricsHistogram := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeHistogram) + outMetricsExponentialHistogram := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeExponentialHistogram) + outMetricsSummary := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeSummary) + + attrs := []pcommon.Map{ + outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes(), + outLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes(), + outMetricsGauge.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes(), + outMetricsSum.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes(), + outMetricsHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Histogram().DataPoints().At(0).Attributes(), + outMetricsExponentialHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).ExponentialHistogram().DataPoints().At(0).Attributes(), + outMetricsSummary.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Summary().DataPoints().At(0).Attributes(), + } + + for _, attr := range attrs { + for k := range redacted { + _, ok := attr.Get(k) + assert.False(t, ok) + } + _, ok := attr.Get(redactedKeys) + assert.False(t, ok) + _, ok = attr.Get(redactedKeyCount) + assert.False(t, ok) + _, ok = attr.Get(maskedValues) + assert.False(t, ok) + _, ok = attr.Get(maskedValueCount) assert.False(t, ok) + value, _ := attr.Get("name") + assert.Equal(t, "placeholder ****", value.Str()) } - _, ok := attr.Get(redactedKeys) - assert.False(t, ok) - _, ok = attr.Get(redactedKeyCount) - assert.False(t, ok) - _, ok = attr.Get(maskedValues) - assert.False(t, ok) - _, ok = attr.Get(maskedValueCount) - assert.False(t, ok) - value, _ := attr.Get("name") - assert.Equal(t, "placeholder ****", value.Str()) } // TestRedactSummaryDefault validates that the processor does not create the @@ -265,18 +369,35 @@ func TestRedactSummaryDefault(t *testing.T) { } outTraces := runTest(t, allowed, nil, masked, ignored, config) - - attr := outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes() - _, ok := attr.Get(redactedKeys) - assert.False(t, ok) - _, ok = attr.Get(redactedKeyCount) - assert.False(t, ok) - _, ok = attr.Get(maskedValues) - assert.False(t, ok) - _, ok = attr.Get(maskedValueCount) - assert.False(t, ok) - _, ok = attr.Get(ignoredKeyCount) - assert.False(t, ok) + outLogs := runLogsTest(t, allowed, nil, masked, ignored, config) + outMetricsGauge := runMetricsTest(t, allowed, nil, masked, ignored, config, pmetric.MetricTypeGauge) + outMetricsSum := runMetricsTest(t, allowed, nil, masked, ignored, config, pmetric.MetricTypeSum) + outMetricsHistogram := runMetricsTest(t, allowed, nil, masked, ignored, config, pmetric.MetricTypeHistogram) + outMetricsExponentialHistogram := runMetricsTest(t, allowed, nil, masked, ignored, config, pmetric.MetricTypeExponentialHistogram) + outMetricsSummary := runMetricsTest(t, allowed, nil, masked, ignored, config, pmetric.MetricTypeSummary) + + attrs := []pcommon.Map{ + outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes(), + outLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes(), + outMetricsGauge.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes(), + outMetricsSum.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes(), + outMetricsHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Histogram().DataPoints().At(0).Attributes(), + outMetricsExponentialHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).ExponentialHistogram().DataPoints().At(0).Attributes(), + outMetricsSummary.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Summary().DataPoints().At(0).Attributes(), + } + + for _, attr := range attrs { + _, ok := attr.Get(redactedKeys) + assert.False(t, ok) + _, ok = attr.Get(redactedKeyCount) + assert.False(t, ok) + _, ok = attr.Get(maskedValues) + assert.False(t, ok) + _, ok = attr.Get(maskedValueCount) + assert.False(t, ok) + _, ok = attr.Get(ignoredKeyCount) + assert.False(t, ok) + } } // TestMultipleBlockValues validates that the processor can block multiple @@ -297,35 +418,52 @@ func TestMultipleBlockValues(t *testing.T) { } outTraces := runTest(t, allowed, redacted, masked, nil, config) + outLogs := runLogsTest(t, allowed, redacted, masked, nil, config) + outMetricsGauge := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeGauge) + outMetricsSum := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeSum) + outMetricsHistogram := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeHistogram) + outMetricsExponentialHistogram := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeExponentialHistogram) + outMetricsSummary := runMetricsTest(t, allowed, redacted, masked, nil, config, pmetric.MetricTypeSummary) + + attrs := []pcommon.Map{ + outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes(), + outLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes(), + outMetricsGauge.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes(), + outMetricsSum.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints().At(0).Attributes(), + outMetricsHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Histogram().DataPoints().At(0).Attributes(), + outMetricsExponentialHistogram.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).ExponentialHistogram().DataPoints().At(0).Attributes(), + outMetricsSummary.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Summary().DataPoints().At(0).Attributes(), + } + + for _, attr := range attrs { + deleted := make([]string, 0, len(redacted)) + for k := range redacted { + _, ok := attr.Get(k) + assert.False(t, ok) + deleted = append(deleted, k) + } + maskedKeys, ok := attr.Get(redactedKeys) + assert.True(t, ok) + assert.Equal(t, strings.Join(deleted, ","), maskedKeys.Str()) + maskedKeyCount, ok := attr.Get(redactedKeyCount) + assert.True(t, ok) + assert.Equal(t, int64(len(deleted)), maskedKeyCount.Int()) - attr := outTraces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes() - deleted := make([]string, 0, len(redacted)) - for k := range redacted { - _, ok := attr.Get(k) - assert.False(t, ok) - deleted = append(deleted, k) - } - maskedKeys, ok := attr.Get(redactedKeys) - assert.True(t, ok) - assert.Equal(t, strings.Join(deleted, ","), maskedKeys.Str()) - maskedKeyCount, ok := attr.Get(redactedKeyCount) - assert.True(t, ok) - assert.Equal(t, int64(len(deleted)), maskedKeyCount.Int()) - - blockedKeys := []string{"name", "mystery"} - maskedValues, ok := attr.Get(maskedValues) - assert.True(t, ok) - sort.Strings(blockedKeys) - assert.Equal(t, strings.Join(blockedKeys, ","), maskedValues.Str()) - assert.Equal(t, pcommon.ValueTypeStr, maskedValues.Type()) - assert.Equal(t, strings.Join(blockedKeys, ","), maskedValues.Str()) - maskedValueCount, ok := attr.Get(maskedValueCount) - assert.True(t, ok) - assert.Equal(t, int64(len(blockedKeys)), maskedValueCount.Int()) - nameValue, _ := attr.Get("name") - mysteryValue, _ := attr.Get("mystery") - assert.Equal(t, "placeholder **** ****", nameValue.Str()) - assert.Equal(t, "mystery ****", mysteryValue.Str()) + blockedKeys := []string{"name", "mystery"} + maskedValues, ok := attr.Get(maskedValues) + assert.True(t, ok) + sort.Strings(blockedKeys) + assert.Equal(t, strings.Join(blockedKeys, ","), maskedValues.Str()) + assert.Equal(t, pcommon.ValueTypeStr, maskedValues.Type()) + assert.Equal(t, strings.Join(blockedKeys, ","), maskedValues.Str()) + maskedValueCount, ok := attr.Get(maskedValueCount) + assert.True(t, ok) + assert.Equal(t, int64(len(blockedKeys)), maskedValueCount.Int()) + nameValue, _ := attr.Get("name") + mysteryValue, _ := attr.Get("mystery") + assert.Equal(t, "placeholder **** ****", nameValue.Str()) + assert.Equal(t, "mystery ****", mysteryValue.Str()) + } } // TestProcessAttrsAppliedTwice validates a use case when data is coming through redaction processor more than once. @@ -415,6 +553,120 @@ func runTest( return outBatch } +// runLogsTest transforms the test input log data and passes it through the processor +func runLogsTest( + t *testing.T, + allowed map[string]pcommon.Value, + redacted map[string]pcommon.Value, + masked map[string]pcommon.Value, + ignored map[string]pcommon.Value, + config *Config, +) plog.Logs { + inBatch := plog.NewLogs() + rl := inBatch.ResourceLogs().AppendEmpty() + ils := rl.ScopeLogs().AppendEmpty() + + library := ils.Scope() + library.SetName("first-library") + logEntry := ils.LogRecords().AppendEmpty() + logEntry.Body().SetStr("first-batch-first-logEntry") + logEntry.SetTraceID([16]byte{1, 2, 3, 4}) + + length := len(allowed) + len(masked) + len(redacted) + len(ignored) + for k, v := range allowed { + v.CopyTo(logEntry.Attributes().PutEmpty(k)) + } + for k, v := range masked { + v.CopyTo(logEntry.Attributes().PutEmpty(k)) + } + for k, v := range redacted { + v.CopyTo(logEntry.Attributes().PutEmpty(k)) + } + for k, v := range ignored { + v.CopyTo(logEntry.Attributes().PutEmpty(k)) + } + + assert.Equal(t, logEntry.Attributes().Len(), length) + assert.Equal(t, ils.LogRecords().At(0).Attributes().Len(), length) + assert.Equal(t, inBatch.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Len(), length) + + // test + ctx := context.Background() + processor, err := newRedaction(ctx, config, zaptest.NewLogger(t)) + assert.NoError(t, err) + outBatch, err := processor.processLogs(ctx, inBatch) + + // verify + assert.NoError(t, err) + return outBatch +} + +// runMetricsTest transforms the test input metric data and passes it through the processor +func runMetricsTest( + t *testing.T, + allowed map[string]pcommon.Value, + redacted map[string]pcommon.Value, + masked map[string]pcommon.Value, + ignored map[string]pcommon.Value, + config *Config, + metricType pmetric.MetricType, +) pmetric.Metrics { + inBatch := pmetric.NewMetrics() + rl := inBatch.ResourceMetrics().AppendEmpty() + ils := rl.ScopeMetrics().AppendEmpty() + + library := ils.Scope() + library.SetName("first-library") + metric := ils.Metrics().AppendEmpty() + metric.SetDescription("first-batch-first-metric") + + length := len(allowed) + len(masked) + len(redacted) + len(ignored) + + var dataPointAttrs pcommon.Map + switch metricType { + case pmetric.MetricTypeGauge: + dataPointAttrs = metric.SetEmptyGauge().DataPoints().AppendEmpty().Attributes() + case pmetric.MetricTypeSum: + dataPointAttrs = metric.SetEmptySum().DataPoints().AppendEmpty().Attributes() + case pmetric.MetricTypeHistogram: + dataPointAttrs = metric.SetEmptyHistogram().DataPoints().AppendEmpty().Attributes() + case pmetric.MetricTypeExponentialHistogram: + dataPointAttrs = metric.SetEmptyExponentialHistogram().DataPoints().AppendEmpty().Attributes() + case pmetric.MetricTypeSummary: + dataPointAttrs = metric.SetEmptySummary().DataPoints().AppendEmpty().Attributes() + case pmetric.MetricTypeEmpty: + } + for k, v := range allowed { + v.CopyTo(dataPointAttrs.PutEmpty(k)) + v.CopyTo(rl.Resource().Attributes().PutEmpty(k)) + } + for k, v := range masked { + v.CopyTo(dataPointAttrs.PutEmpty(k)) + v.CopyTo(rl.Resource().Attributes().PutEmpty(k)) + } + for k, v := range redacted { + v.CopyTo(dataPointAttrs.PutEmpty(k)) + v.CopyTo(rl.Resource().Attributes().PutEmpty(k)) + } + for k, v := range ignored { + v.CopyTo(dataPointAttrs.PutEmpty(k)) + v.CopyTo(rl.Resource().Attributes().PutEmpty(k)) + } + + assert.Equal(t, length, dataPointAttrs.Len()) + assert.Equal(t, length, rl.Resource().Attributes().Len()) + + // test + ctx := context.Background() + processor, err := newRedaction(ctx, config, zaptest.NewLogger(t)) + assert.NoError(t, err) + outBatch, err := processor.processMetrics(ctx, inBatch) + + // verify + assert.NoError(t, err) + return outBatch +} + // BenchmarkRedactSummaryDebug measures the performance impact of running the processor // with full debug level of output for redacting span attributes not on the allowed // keys list