diff --git a/.cspell.json b/.cspell.json new file mode 100644 index 00000000..da552a52 --- /dev/null +++ b/.cspell.json @@ -0,0 +1,89 @@ +// http://cspell.org/configuration/ +{ + "version": "0.2", + "language": "en,en-US", + "useGitignore": true, + "minWordLength": 4, + "ignorePaths": [ + "target/**" + ], + // list of words to be ignored. unlike `words` below, these won't be + // suggested as corrections for misspelled words. + "ignoreWords": [ + "otel", + "rustdoc", + "rustfilt" + ], + // these are words that are always considered incorrect. + "flagWords": [ + "recieve", + "reciever", + "seperate", + "hte", + "teh" + ], + // these are words that are always correct and can be thought of as our + // workspace dictionary. + "words": [ + "actix", + "appender", + "appenders", + "Bhasin", + "Cijo", + "codecov", + "deque", + "Dirkjan", + "hasher", + "isahc", + "Isobel", + "jaegertracing", + "Kühle", + "Kumar", + "Lalit", + "msrv", + "Ochtman", + "openetelemetry", + "opentelemetry", + "OTLP", + "protoc", + "quantile", + "Redelmeier", + "reqwest", + "rustc", + "Tescher", + "Zhongyang", + "zipkin" + ], + "enabledLanguageIds": [ + "jsonc", + "markdown", + "plaintext", + "rust", + "shellscript" + ], + "languageSettings": [ + { + "languageId": "jsonc", + "includeRegExpList": [ + "CStyleComment" + ] + }, + { + "languageId": "markdown", + "caseSensitive": false + }, + { + "languageId": "rust", + "includeRegExpList": [ + "CStyleComment", + "strings" + ] + }, + { + "languageId": "shellscript", + "includeRegExpList": [ + "/#.*/g" + ] + } + ] +} diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..2f7896d1 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +target/ diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml new file mode 100644 index 00000000..ac6cd815 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml @@ -0,0 +1,56 @@ +name: Bug Report +description: File a bug report +title: "[Bug]: " +labels: ["bug", "triage:todo"] +projects: ["open-telemetry/opentelemetry-rust-contrib"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: textarea + id: what-happened + attributes: + label: What happened? + description: Also tell us, what did you expect to happen? + placeholder: Tell us what you see! + value: "A bug happened!" + validations: + required: true + - type: textarea + id: api-version + attributes: + label: API Version + description: What version of the OpenTelemetry API are you using? + placeholder: 0.x, 1.x, etc. + validations: + required: true + - type: textarea + id: sdk-version + attributes: + label: SDK Version + description: What version of the OpenTelemetry SDK are you using? + placeholder: 0.x, 1.x, etc. + validations: + required: true + - type: dropdown + id: components + attributes: + label: What component are you working with? + multiple: true + options: + - opentelemetry-aws + - opentelemetry-contrib + - opentelemetry-datadog + - opentelemetry-dynatrace + - opentelemetry-stackdriver + - opentelemetry-user-events-logs + - opentelemetry-user-events-metrics + - opentelemetry-zpages + - N/A + - type: textarea + id: logs + attributes: + label: Relevant log output + description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. + render: shell diff --git a/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml new file mode 100644 index 00000000..e43a4659 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml @@ -0,0 +1,63 @@ +--- +name: "Feature Request" +description: Request a feature for the OpenTelemetry Rust implementation. +title: "[Feature]: " +labels: ["enhancement", "triage:todo"] +projects: ["open-telemetry/opentelemetry-rust-contrib"] +body: + - type: markdown + attributes: + value: | + Thanks for using our library and trying to make it better! + + Before opening a feature request against this repo, consider whether the feature + should/could be implemented in the [other OpenTelemetry client + libraries](https://github.com/open-telemetry/). If so, please [open an issue on + opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification/issues/new) first. + - type: textarea + id: related-problem + attributes: + label: Related Problems? + description: Is your feature request related to a problem? If so, provide a concise description of the problem. + placeholder: Include the Issue ID from this or other repos. + validations: + required: false + - type: dropdown + id: components + attributes: + label: What component are you working with? + multiple: true + options: + - opentelemetry-aws + - opentelemetry-contrib + - opentelemetry-datadog + - opentelemetry-dynatrace + - opentelemetry-stackdriver + - opentelemetry-user-events-logs + - opentelemetry-user-events-metrics + - opentelemetry-zpages + - N/A + - type: textarea + id: solution + attributes: + label: "Describe the solution you'd like:" + description: What do you want to happen instead? What is the expected behavior? + placeholder: I'd like the api to ... + validations: + required: true + - type: textarea + id: alternatives + attributes: + label: Considered Alternatives + description: Which alternative solutions or features have you considered? + placeholder: Some potential solutions + validations: + required: false + - type: textarea + id: additional-context + attributes: + label: Additional Context + description: Add any other context about the feature request here. + placeholder: Some related requests in other project or upstream spec proposals. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..d329fa54 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +contact_links: + - name: GitHub Discussions + url: https://github.com/open-telemetry/opentelemetry-rust-contrib/discussions/new/choose + about: Please ask questions here. + - name: Slack + url: https://cloud-native.slack.com/archives/C03GDP0H023 + about: Or the `#otel-rust` channel in the CNCF Slack instance. (Not terribly responsive.) + - name: "⚠️ Report a security vulnerability" + url: "https://github.com/open-telemetry/opentelemetry-rust-contrib/security/advisories/new" + about: "Report a security vulnerability." + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..a5b66d2f --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,13 @@ +Fixes # +Design discussion issue (if applicable) # + +## Changes + +Please provide a brief description of the changes here. + +## Merge requirement checklist + +* [ ] [CONTRIBUTING](https://github.com/open-telemetry/opentelemetry-rust/blob/main/CONTRIBUTING.md) guidelines followed +* [ ] Unit tests added/updated (if applicable) +* [ ] Appropriate `CHANGELOG.md` files updated for non-trivial, user-facing changes +* [ ] Changes in public API reviewed (if applicable) diff --git a/.github/codecov.yaml b/.github/codecov.yaml new file mode 100644 index 00000000..de1952b6 --- /dev/null +++ b/.github/codecov.yaml @@ -0,0 +1,30 @@ +codecov: + require_ci_to_pass: yes + +coverage: + precision: 1 + round: down + range: "50...100" + status: + project: + default: + target: auto + threshold: 0.5% + +ignore: + - "opentelemetry/src/testing" # test harnesses + - "opentelemetry-jaeger/src/testing" # test harness + - "opentelemetry-jaeger/src/exporter/thrift" # auto generated files + - "opentelemetry-otlp/src/proto" # auto generated files + - "opentelemetry-proto/src/proto" # auto generated files + # examples below + - "examples" + - "opentelemetry-jaeger/examples" + - "opentelemetry-zipkin/examples" + - "opentelemetry-otlp/examples" + - "opentelemetry-aws/examples" + - "opentelemetry-datadog/examples" + - "opentelemetry-dynatrace/examples" + - "opentelemetry-http/examples" + - "opentelemetry-prometheus/examples" + - "opentelemetry-zpages/examples" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..9f805fe2 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,119 @@ +name: CI +env: + CI: true +on: + pull_request: + push: + branches: + - main + paths-ignore: + - '**.md' +jobs: + test: + strategy: + matrix: + rust: [stable, beta] + runs-on: ubuntu-latest + steps: + - name: Free disk space + run: | + df -h + sudo rm -rf /usr/local/lib/android + sudo rm -rf /usr/share/dotnet + df -h + - uses: actions/checkout@v1 + with: + submodules: true + - uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ matrix.rust }} + components: rustfmt + profile: minimal + - uses: arduino/setup-protoc@v1 + - name: Test + run: ./scripts/test.sh + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + with: + submodules: true + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + components: rustfmt + profile: minimal + - uses: arduino/setup-protoc@v1 + - uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + - name: Lint + run: ./scripts/lint.sh +# TODO: re-add this test +# msrv: +# runs-on: ubuntu-latest +# steps: +# - uses: actions/checkout@v1 +# with: +# submodules: true +# - uses: actions-rs/toolchain@v1 +# with: +# profile: minimal +# toolchain: 1.65.0 +# override: true +# - name: Patch dependencies versions # some dependencies bump MSRV without major version bump +# run: ./scripts/patch_dependencies.sh +# - name: Run tests +# run: cargo --version && +# cargo test --manifest-path=opentelemetry/Cargo.toml --features trace,metrics,testing && +# cargo test --manifest-path=opentelemetry-jaeger/Cargo.toml --features rt-tokio && +# cargo test --manifest-path=opentelemetry-zipkin/Cargo.toml + cargo-deny: + runs-on: ubuntu-latest + continue-on-error: true # Prevent sudden announcement of a new advisory from failing ci + steps: + - uses: actions/checkout@v2 + - uses: EmbarkStudios/cargo-deny-action@v1 + with: + command: check advisories + docs: + continue-on-error: true + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: nightly + components: rustfmt + override: true + - uses: arduino/setup-protoc@v1 + - name: doc + run: cargo doc --no-deps --all-features + env: + CARGO_INCREMENTAL: '0' + RUSTDOCFLAGS: -Dwarnings + coverage: + continue-on-error: true + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: true + - uses: actions-rs/toolchain@v1 + with: + toolchain: stable + components: rustfmt,llvm-tools-preview + override: true + - uses: arduino/setup-protoc@v1 + - name: cargo install cargo-llvm-cov + uses: taiki-e/install-action@cargo-llvm-cov + - name: cargo generate-lockfile + if: hashFiles('Cargo.lock') == '' + run: cargo generate-lockfile + - name: cargo llvm-cov + run: cargo llvm-cov --locked --all-features --workspace --lcov --output-path lcov.info + - name: Upload to codecov.io + uses: codecov/codecov-action@v3 + with: + fail_ci_if_error: true diff --git a/.gitignore b/.gitignore index 6985cf1b..51c76118 100644 --- a/.gitignore +++ b/.gitignore @@ -2,13 +2,12 @@ # will have compiled files and executables debug/ target/ - -# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries -# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html -Cargo.lock +*/target/ # These are backup files generated by rustfmt **/*.rs.bk # MSVC Windows builds of rustc generate these, which store debugging information *.pdb +/.vscode/ +Cargo.lock diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..e69de29b diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..7931a166 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,7 @@ +# Code owners file + +## This file controls who is tagged for review for any given pull request. + +## For anything not explicitly taken by someone else: + +* @open-telemetry/rust-approvers diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..e831d0df --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,195 @@ +# Contributing to opentelemetry-rust + +The Rust special interest group (SIG) meets weekly on Tuesdays at 8 AM Pacific +Time (16:00 UTC). The meeting is subject to change depending on contributors' +availability. Check the [OpenTelemetry community +calendar](https://calendar.google.com/calendar/embed?src=google.com_b79e3e90j7bbsa2n2p5an5lf60%40group.calendar.google.com) +for specific dates and for Zoom meeting links. "OTel Rust SIG" is the name of +meeting for this group. + +Meeting notes are available as a public [Google +doc](https://docs.google.com/document/d/1tGKuCsSnyT2McDncVJrMgg74_z8V06riWZa0Sr79I_4/edit). +If you have trouble accessing the doc, please get in touch on +[Slack](https://cloud-native.slack.com/archives/C03GDP0H023). + +The meeting is open for all to join. We invite everyone to join our meeting, +regardless of your experience level. Whether you're a seasoned OpenTelemetry +developer, just starting your journey, or simply curious about the work we do, +you're more than welcome to participate! + +## Pull Requests + +### Prerequisites + +Crate `opentelemetry-otlp` uses gRPC + Protocol Buffers. +You can provide the protocol compiler protoc path programmatically (only works with tonic) or build it from source + +```sh +export PROTOC=$(which protoc) +``` + +Prerequisites to build the protocol compiler protoc from source + +- [protoc](https://github.com/protocolbuffers/protobuf) +- [cmake](https://cmake.org) +- [llvm](https://releases.llvm.org/download.html) (and `LIBCLANG_PATH` environment variable pointing to the `bin` directory of LLVM install) + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-rust` via +GitHub pull requests (PRs). + +```sh +git clone --recurse-submodule https://github.com/open-telemetry/opentelemetry-rust +``` + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-rust +``` + +Check out a new branch, make modifications, run linters and tests, and +push the branch to your fork: + +```sh +$ git checkout -b +# edit files +$ git add -p +$ git commit +$ git push +``` + +Open a pull request against the main +[opentelemetry-rust](https://github.com/open-telemetry/opentelemetry-rust) +repo. + +> **Note** +> It is recommended to run [pre-commit script](precommit.sh) from the root of +the repo to catch any issues locally. + +### How to Receive Comments + +- If the PR is not ready for review, please put `[WIP]` in the title or mark it + as [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +- Make sure CLA is signed and all required CI checks are clear. +- Submit small, focused PRs addressing a single concern/issue. +- Make sure the PR title reflects the contribution. +- Write a summary that helps understand the change. +- Include usage examples in the summary, where applicable. +- Include benchmarks (before/after) in the summary, for contributions that are + performance enhancements. + +### How to Get PRs Merged + +A PR is considered to be **ready to merge** when: + +- It has received approval from + [Approvers](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver). + / + [Maintainers](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer). +- Major feedbacks are resolved. + +Any Maintainer can merge the PR once it is **ready to merge**. Note, that some +PRs may not be merged immediately if the repo is in the process of a release and +the maintainers decided to defer the PR to the next release train. Also, +maintainers may decide to wait for more than one approval for certain PRs, +particularly ones that are affecting multiple areas, or topics that may warrant +more discussion. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-rust follows the +[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). + +It's especially valuable to read through the [library +guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/library-guidelines.md). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see: + + +### Error Handling + +Currently, the Opentelemetry Rust SDK has two ways to handle errors. In the situation where errors are not allowed to return. One should call global error handler to process the errors. Otherwise, one should return the errors. + +The Opentelemetry Rust SDK comes with an error type `openetelemetry::Error`. For different function, one error has been defined. All error returned by trace module MUST be wrapped in `opentelemetry::trace::TraceError`. All errors returned by metrics module MUST be wrapped in `opentelemetry::metrics::MetricsError`. + +For users that want to implement their own exporters. It's RECOMMENDED to wrap all errors from the exporter into a crate-level error type, and implement `ExporterError` trait. + +### Priority of configurations + +OpenTelemetry supports multiple ways to configure the API, SDK and other components. The priority of configurations is as follows: + +- Environment variables +- Compiling time configurations provided in the source code + +## Style Guide + +- Run `cargo clippy --all` - this will catch common mistakes and improve +your Rust code +- Run `cargo fmt` - this will find and fix code formatting +issues. + +## Testing and Benchmarking + +- Run `cargo test --all` - this will execute code and doc tests for all +projects in this workspace. +- Run `cargo bench` - this will run benchmarks to show performance +- Run `cargo bench` - this will run benchmarks to show performance +regressions + +## Approvers and Maintainers + +For GitHub groups see the [code owners](CODEOWNERS) file. + +### Maintainers + +* [Cijo Thomas](https://github.com/cijothomas) +* [Harold Dost](https://github.com/hdost) +* [Julian Tescher](https://github.com/jtescher) +* [Zhongyang Wu](https://github.com/TommyCpp) + +### Approvers + +* [Lalit Kumar Bhasin](https://github.com/lalitb) +* [Shaun Cox](https://github.com/shaun-cox) + +### Emeritus + +- [Dirkjan Ochtman](https://github.com/djc) +- [Jan Kühle](https://github.com/frigus02) +- [Isobel Redelmeier](https://github.com/iredelmeier) + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/master/community-membership.md). + +### Thanks to all the people who have contributed + +[![contributors](https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-rust)](https://github.com/open-telemetry/opentelemetry-rust/graphs/contributors) + +## FAQ + +### Where should I put third party propagators/exporters, contrib or standalone crates? + +As of now, the specification classify the propagators into three categories: +Fully opened standards, platform-specific standards, proprietary headers. The +conclusion is only the fully opened standards should live in SDK packages/repos. +So here, only fully opened standards should live as independent crate. For more +detail and discussion, see [this +pr](https://github.com/open-telemetry/opentelemetry-specification/pull/1144). diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000..88ecb078 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,21 @@ +[workspace] +members = [ + "opentelemetry-aws", + "opentelemetry-contrib", +# TODO: Add back in once this relies on a published version +# "opentelemetry-datadog", +# TODO: Add back in once this relies on a published version +# "opentelemetry-stackdriver", + "opentelemetry-user-events-logs", + "opentelemetry-user-events-metrics", +# TODO: Add back in once this relies on a published version +# "opentelemetry-zpages", + "examples/traceresponse", +] +resolver = "2" + +[profile.bench] +# https://doc.rust-lang.org/cargo/reference/profiles.html#bench +# See function names in profiling reports. +# 2/true is too much, 0 is not enough, 1 is just right for back traces +debug = 1 diff --git a/LICENSE b/LICENSE index 261eeb9e..1ef7dad2 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright The OpenTelemetry Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index f3ff8cfa..ecbd3dec 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,138 @@ -# opentelemetry-rust-contrib -OpenTelemetry Contrib Packages for Rust +![OpenTelemetry — An observability framework for cloud-native software.][splash] + +[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust-contrib/main/assets/logo-text.png + +# OpenTelemetry Conrib Packages for Rust + +The Rust [OpenTelemetry](https://opentelemetry.io/) implementation contrib components + +[![LICENSE](https://img.shields.io/crates/l/opentelemetry)](./LICENSE) +[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust-contrib/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust-contrib/actions?query=workflow%3ACI+branch%3Amain) +[![codecov](https://codecov.io/gh/open-telemetry/opentelemetry-rust-contrib/branch/main/graph/badge.svg)](https://codecov.io/gh/open-telemetry/opentelemetry-rust-contrib) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) + +[Website](https://opentelemetry.io/) | +[Slack](https://cloud-native.slack.com/archives/C03GDP0H023) | +[Documentation](https://docs.rs/opentelemetry) + +## Overview + +This is a collection of extra utilities which are outsite of the core API, SDK, and +core exporters. + +*Compiler support: [requires `rustc` 1.65+][msrv]* + +[msrv]: #supported-rust-versions + +## Getting Started + +See the [examples](./examples) directory for different integration patterns. + +## Ecosystem + +### Related Crates + +In addition to `opentelemetry`, the [`open-telemetry/opentelemetry-rust`] +repository contains several additional crates designed to be used with the +`opentelemetry` ecosystem. This includes a collection of trace `SpanExporter` +and metrics pull and push controller implementations, as well as utility and +adapter crates to assist in propagating state and instrumenting applications. + +In particular, the following crates are likely to be of interest: + +* [`opentelemetry-aws`] provides unofficial propagators for AWS X-ray. +* [`opentelemetry-datadog`] provides additional exporters to [`Datadog`]. +* [`opentelemetry-dynatrace`] provides additional exporters to Dynatrace. +* [`opentelemetry-contrib`] provides additional exporters and propagators that + are experimental. +* [`opentelemetry-http`] provides an interface for injecting and extracting + trace information from [`http`] headers. +* [`opentelemetry-jaeger`] provides a pipeline and exporter for sending trace + information to [`Jaeger`]. +* [`opentelemetry-otlp`] exporter for sending trace and metric data in the OTLP + format to the OpenTelemetry collector. +* [`opentelemetry-prometheus`] provides a pipeline and exporter for sending + metrics information to [`Prometheus`]. +* [`opentelemetry-semantic-conventions`] provides standard names and semantic + otel conventions. +* [`opentelemetry-stackdriver`] provides an exporter for Google's [Cloud Trace] + (which used to be called StackDriver). +* [`opentelemetry-zipkin`] provides a pipeline and exporter for sending trace + information to [`Zipkin`]. + +Additionally, there are also several third-party crates which are not +maintained by the `opentelemetry` project. These include: + +* [`tracing-opentelemetry`] provides integration for applications instrumented + using the [`tracing`] API and ecosystem. +* [`actix-web-opentelemetry`] provides integration for the [`actix-web`] web + server and ecosystem. +* [`opentelemetry-application-insights`] provides an unofficial [Azure + Application Insights] exporter. +* [`opentelemetry-tide`] provides integration for the [`Tide`] web server and + ecosystem. + +If you're the maintainer of an `opentelemetry` ecosystem crate not listed +above, please let us know! We'd love to add your project to the list! + +[`open-telemetry/opentelemetry-rust`]: https://github.com/open-telemetry/opentelemetry-rust +[`opentelemetry-jaeger`]: https://crates.io/crates/opentelemetry-jaeger +[`Jaeger`]: https://www.jaegertracing.io +[`opentelemetry-otlp`]: https://crates.io/crates/opentelemetry-otlp +[`opentelemetry-http`]: https://crates.io/crates/opentelemetry-http +[`opentelemetry-prometheus`]: https://crates.io/crates/opentelemetry-prometheus +[`opentelemetry-aws`]: https://crates.io/crates/opentelemetry-aws +[`Prometheus`]: https://prometheus.io +[`opentelemetry-zipkin`]: https://crates.io/crates/opentelemetry-zipkin +[`Zipkin`]: https://zipkin.io +[`opentelemetry-contrib`]: https://crates.io/crates/opentelemetry-contrib +[`Datadog`]: https://www.datadoghq.com +[`opentelemetry-datadog`]: https://crates.io/crates/opentelemetry-datadog +[`opentelemetry-dynatrace`]: https://crates.io/crates/opentelemetry-dynatrace +[`opentelemetry-semantic-conventions`]: https://crates.io/crates/opentelemetry-semantic-conventions +[`http`]: https://crates.io/crates/http + +[`tracing-opentelemetry`]: https://crates.io/crates/tracing-opentelemetry +[`tracing`]: https://crates.io/crates/tracing +[`actix-web-opentelemetry`]: https://crates.io/crates/actix-web-opentelemetry +[`actix-web`]: https://crates.io/crates/actix-web +[`opentelemetry-application-insights`]: https://crates.io/crates/opentelemetry-application-insights +[Azure Application Insights]: https://docs.microsoft.com/en-us/azure/azure-monitor/app/app-insights-overview +[`opentelemetry-tide`]: https://crates.io/crates/opentelemetry-tide +[`Tide`]: https://crates.io/crates/tide +[`opentelemetry-stackdriver`]: https://crates.io/crates/opentelemetry-stackdriver +[Cloud Trace]: https://cloud.google.com/trace/ + +## Supported Rust Versions + +OpenTelemetry is built against the latest stable release. The minimum supported +version is 1.64. The current OpenTelemetry version is not guaranteed to build +on Rust versions earlier than the minimum supported version. + +The current stable Rust compiler and the three most recent minor versions +before it will always be supported. For example, if the current stable compiler +version is 1.49, the minimum supported version will not be increased past 1.46, +three minor versions prior. Increasing the minimum supported compiler version +is not considered a semver breaking change as long as doing so complies with +this policy. + +## Contributing + +See the [contributing file](CONTRIBUTING.md). + +The Rust special interest group (SIG) meets weekly on Tuesdays at 8 AM Pacific +Time (16:00 UTC). The meeting is subject to change depending on contributors' +availability. Check the [OpenTelemetry community +calendar](https://calendar.google.com/calendar/embed?src=google.com_b79e3e90j7bbsa2n2p5an5lf60%40group.calendar.google.com) +for specific dates and for Zoom meeting links. "OTel Rust SIG" is the name of +meeting for this group. + +Meeting notes are available as a public [Google +doc](https://docs.google.com/document/d/1tGKuCsSnyT2McDncVJrMgg74_z8V06riWZa0Sr79I_4/edit). +If you have trouble accessing the doc, please get in touch on +[Slack](https://cloud-native.slack.com/archives/C03GDP0H023). + +The meeting is open for all to join. We invite everyone to join our meeting, +regardless of your experience level. Whether you're a seasoned OpenTelemetry +developer, just starting your journey, or simply curious about the work we do, +you're more than welcome to participate! diff --git a/VERSIONING.md b/VERSIONING.md new file mode 100644 index 00000000..6fa7fb4d --- /dev/null +++ b/VERSIONING.md @@ -0,0 +1,66 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +## Goals + +### API Stability + +Once the API for a given signal (spans, logs, metrics, baggage) has been +officially released, that API module will function with any SDK that has the +same major version, and equal or greater minor or patch version. + +For example, libraries that are instrumented with `opentelemetry 1.0.1` will +function in applications using `opentelemetry 1.11.33` or `opentelemetry +1.3.4`. + +### SDK Stability + +Public portions of the SDK (constructors, configuration, end-user interfaces) +must remain backwards compatible. Internal types are allowed to break. + +## Policy + +* Releases will follow [SemVer](https://semver.org/). +* New telemetry signals will be introduced behind experimental + [cargo features](https://doc.rust-lang.org/cargo/reference/features.html). + + * New signals will be stabilized via a **minor version bump**, and are not + allowed to break existing stable interfaces. + +* GitHub releases will be made for all released versions. +* Crates will be released on crates.io + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy, here is an example +of how the metrics and logging signals **could** stabilize. + +- v1.0.0 release: + - `opentelemetry 1.0.0` + - Contains stable impls of trace, baggage, resource, context modules + - experimental metrics impl behind feature flag + - `opentelemetry-semantic-conventions 1.0.0` + - Contains stable impls of trace, resource conventions + - experimental metrics conventions behind feature flag + - `opentelemetry-contrib 1.0.0` + - Contains stable impls of 3rd party trace exporters and propagators + - experimental metrics exporters and propagator impls behind feature flag +- v1.5.0 release (with metrics) + - `opentelemetry 1.5.0` + - Contains stable impls of metrics, trace, baggage, resource, context modules + - experimental logging impl still only behind feature flag + - `opentelemetry-semantic-conventions 1.2.0` + - Contains stable impls of metrics, trace, resource conventions + - experimental logging conventions still only behind feature flag + - `opentelemetry-contrib 1.6.0` + - Contains stable impls of 3rd party trace and metrics exporters and propagators + - experimental logging exporters and propagator still impls behind feature flag +- v1.10.0 release (with logging) + - `opentelemetry 1.10.0` + - Contains stable impls of logging, metrics, trace, baggage, resource, context modules + - `opentelemetry-semantic-conventions 1.4.0` + - Contains stable impls of logging, metrics, trace, resource conventions + - `opentelemetry-contrib 1.12.0` + - Contains stable impls of 3rd party trace, metrics, and logging exporters and propagators diff --git a/assets/logo-text.png b/assets/logo-text.png new file mode 100644 index 00000000..c2cab346 Binary files /dev/null and b/assets/logo-text.png differ diff --git a/assets/logo.svg b/assets/logo.svg new file mode 100644 index 00000000..e9c931ae --- /dev/null +++ b/assets/logo.svg @@ -0,0 +1 @@ + diff --git a/deny.toml b/deny.toml new file mode 100644 index 00000000..f1c55337 --- /dev/null +++ b/deny.toml @@ -0,0 +1,38 @@ +exclude=[ + "actix-http", + "actix-http-tracing", + "actix-udp", + "actix-udp-example", + "tracing-grpc", + "http" +] + +[licenses] +unlicensed = "deny" +allow = [ + "MIT", + "Apache-2.0", + "ISC", + "BSD-3-Clause", + "OpenSSL" +] + +[licenses.private] +ignore = true + +[[licenses.clarify]] +name = "ring" +version = "*" +expression = "MIT AND ISC AND OpenSSL" +license-files = [ + { path = "LICENSE", hash = 0xbd0eed23 } +] + +[advisories] +ignore = [ + # unsoundness in indirect dependencies without a safe upgrade below + "RUSTSEC-2021-0145", + "RUSTSEC-2019-0036" +] +unmaintained = "allow" +yanked = "allow" \ No newline at end of file diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..dbdcd5f8 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,41 @@ +# Examples +This folder contains some examples that should help you get start crates from `opentelemetry-rust`. + +## log-basic +**Logs** + +This example uses following crates from this repo: +- opentelemetry(log) +- opentelemetry-appender-log +- opentelemetry-stdout + +Check this example if you want to understand *how to instrument logs using opentelemetry*. + +## metrics-basic +**Metrics** + +This example uses following crates from this repo: +- opentelemetry(metrics) +- opentelemetry-stdout + +Check this example if you want to understand *how to instrument metrics using opentelemetry*. + +## traceresponse +**Tracing** + +This example uses following crates from this repo: +- opentelemetry(tracing) +- opentelemetry-http +- opentelemetry-contrib(TraceContextResponsePropagator) +- opentelemetry-stdout + +## tracing-grpc +**Tracing** + +This example uses following crates from this repo: +- opentelemetry(tracing) +- opentelemetry-jaeger + +The application is built using `tokio`. + +Check this example if you want to understand *how to integrate tracing with opentelemetry*. \ No newline at end of file diff --git a/examples/traceresponse/Cargo.toml b/examples/traceresponse/Cargo.toml new file mode 100644 index 00000000..2ca44c88 --- /dev/null +++ b/examples/traceresponse/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "traceresponse" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0" +publish = false + +[[bin]] # Bin to run the http server +name = "http-server" +path = "src/server.rs" +doc = false + +[[bin]] # Bin to run the client +name = "http-client" +path = "src/client.rs" +doc = false + +[dependencies] +hyper = { version = "0.14", features = ["full"] } +tokio = { version = "1.0", features = ["full"] } +opentelemetry = { version = "0.21.0" } +opentelemetry_sdk = { version = "0.21.0" } +opentelemetry-http = { version = "0.10.0" } +opentelemetry-contrib = { path = "../../opentelemetry-contrib" } +opentelemetry-stdout = { version = "0.2", features = ["trace"] } diff --git a/examples/traceresponse/README.md b/examples/traceresponse/README.md new file mode 100644 index 00000000..aeaa6bdb --- /dev/null +++ b/examples/traceresponse/README.md @@ -0,0 +1,28 @@ +# HTTP Example + +This is a simple example using [hyper] that demonstrates tracing http request +from client to server, and from the server back to the client using the +[W3C Trace Context Response] header. The example shows key aspects of tracing +such as: + +- Root Span (on Client) +- Child Span from a Remote Parent (on Server) +- SpanContext Propagation (from Client to Server) +- SpanContext Propagation (from Server to Client) +- Span Events +- Span Attributes + +[hyper]: https://hyper.rs/ +[W3C Trace Context Response]: https://w3c.github.io/trace-context/#traceresponse-header + +## Usage + +```shell +# Run server +$ cargo run --bin http-server + +# In another tab, run client +$ cargo run --bin http-client + +# The spans should be visible in stdout in the order that they were exported. +``` diff --git a/examples/traceresponse/src/client.rs b/examples/traceresponse/src/client.rs new file mode 100644 index 00000000..d1f6c595 --- /dev/null +++ b/examples/traceresponse/src/client.rs @@ -0,0 +1,69 @@ +use hyper::http::HeaderValue; +use hyper::{body::Body, Client}; +use opentelemetry::{ + global, + propagation::TextMapPropagator, + trace::{SpanKind, TraceContextExt, Tracer}, + Context, KeyValue, +}; +use opentelemetry_contrib::trace::propagator::trace_context_response::TraceContextResponsePropagator; +use opentelemetry_http::{HeaderExtractor, HeaderInjector}; +use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::TracerProvider}; +use opentelemetry_stdout::SpanExporter; + +fn init_tracer() { + global::set_text_map_propagator(TraceContextPropagator::new()); + // Install stdout exporter pipeline to be able to retrieve the collected spans. + // For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. In a production + // application, use `Sampler::ParentBased` or `Sampler::TraceIdRatioBased` with a desired ratio. + let provider = TracerProvider::builder() + .with_simple_exporter(SpanExporter::default()) + .build(); + + global::set_tracer_provider(provider); +} + +#[tokio::main] +async fn main() -> std::result::Result<(), Box> { + init_tracer(); + + let client = Client::new(); + let tracer = global::tracer("example/client"); + let span = tracer + .span_builder("say hello") + .with_kind(SpanKind::Client) + .start(&tracer); + let cx = Context::current_with_span(span); + + let mut req = hyper::Request::builder().uri("http://127.0.0.1:3000"); + global::get_text_map_propagator(|propagator| { + propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap())) + }); + let res = client.request(req.body(Body::from("Hello!"))?).await?; + + let response_propagator: &dyn TextMapPropagator = &TraceContextResponsePropagator::new(); + + let response_cx = + response_propagator.extract_with_context(&cx, &HeaderExtractor(res.headers())); + + let response_span = response_cx.span(); + + cx.span().add_event( + "Got response!".to_string(), + vec![ + KeyValue::new("status", res.status().to_string()), + KeyValue::new( + "traceresponse", + res.headers() + .get("traceresponse") + .unwrap_or(&HeaderValue::from_static("")) + .to_str() + .unwrap() + .to_string(), + ), + KeyValue::new("child_sampled", response_span.span_context().is_sampled()), + ], + ); + + Ok(()) +} diff --git a/examples/traceresponse/src/server.rs b/examples/traceresponse/src/server.rs new file mode 100644 index 00000000..415342c6 --- /dev/null +++ b/examples/traceresponse/src/server.rs @@ -0,0 +1,67 @@ +use hyper::{ + service::{make_service_fn, service_fn}, + Body, Request, Response, Server, +}; +use opentelemetry::{ + global, + propagation::TextMapPropagator, + trace::{SpanKind, TraceContextExt, Tracer}, + Context, +}; +use opentelemetry_contrib::trace::propagator::trace_context_response::TraceContextResponsePropagator; +use opentelemetry_http::{HeaderExtractor, HeaderInjector}; +use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::TracerProvider}; +use opentelemetry_stdout::SpanExporter; +use std::{convert::Infallible, net::SocketAddr}; + +async fn handle(req: Request) -> Result, Infallible> { + let parent_cx = global::get_text_map_propagator(|propagator| { + propagator.extract(&HeaderExtractor(req.headers())) + }); + let _cx_guard = parent_cx.attach(); + + let tracer = global::tracer("example/server"); + let span = tracer + .span_builder("say hello") + .with_kind(SpanKind::Server) + .start(&tracer); + + let cx = Context::current_with_span(span); + + cx.span().add_event("handling this...", Vec::new()); + + let mut res = Response::new("Hello, World!".into()); + + let response_propagator: &dyn TextMapPropagator = &TraceContextResponsePropagator::new(); + response_propagator.inject_context(&cx, &mut HeaderInjector(res.headers_mut())); + + Ok(res) +} + +fn init_tracer() { + global::set_text_map_propagator(TraceContextPropagator::new()); + + // Install stdout exporter pipeline to be able to retrieve the collected spans. + // For the demonstration, use `Sampler::AlwaysOn` sampler to sample all traces. In a production + // application, use `Sampler::ParentBased` or `Sampler::TraceIdRatioBased` with a desired ratio. + let provider = TracerProvider::builder() + .with_simple_exporter(SpanExporter::default()) + .build(); + + global::set_tracer_provider(provider); +} + +#[tokio::main] +async fn main() { + init_tracer(); + let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); + + let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); + + let server = Server::bind(&addr).serve(make_svc); + + println!("Listening on {addr}"); + if let Err(e) = server.await { + eprintln!("server error: {e}"); + } +} diff --git a/opentelemetry-aws/CHANGELOG.md b/opentelemetry-aws/CHANGELOG.md new file mode 100644 index 00000000..6b6327db --- /dev/null +++ b/opentelemetry-aws/CHANGELOG.md @@ -0,0 +1,65 @@ +# Changelog + +## vNext + +## v0.9.0 + +### Changed + +- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) +- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) + +## v0.8.0 + +### Changed + +- Update to opentelemetry-api v0.20.0 + +## v0.7.0 +### Added +- Add public functions for AWS trace header [#887](https://github.com/open-telemetry/opentelemetry-rust/pull/887). + +### Changed +- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953) +- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). + +## v0.6.0 + +### Changed + +- reduce `tokio` feature requirements #750 +- Update to opentelemetry v0.18.0 + +### Fixed + +- Fix XrayPropagator when no header is present #867 + +## v0.5.0 + +### Changed + +- Update to opentelemetry v0.17.0 + +## v0.4.0 + +### Changed + +- Update to opentelemetry v0.16.0 + +## v0.3.0 + +### Changed + +- Update to opentelemetry v0.15.0 + +## v0.2.0 + +### Changed + +- Update to opentelemetry v0.14.0 + +## v0.1.0 + +### Added + +- AWS XRay propagator #446 diff --git a/opentelemetry-aws/CODEOWNERS b/opentelemetry-aws/CODEOWNERS new file mode 100644 index 00000000..d6962a90 --- /dev/null +++ b/opentelemetry-aws/CODEOWNERS @@ -0,0 +1,5 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. + +# For anything not explicitly taken by someone else: +* @open-telemetry/rust-approvers diff --git a/opentelemetry-aws/Cargo.toml b/opentelemetry-aws/Cargo.toml new file mode 100644 index 00000000..6fb9dfff --- /dev/null +++ b/opentelemetry-aws/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "opentelemetry-aws" +version = "0.9.0" +description = "AWS exporters and propagators for OpenTelemetry" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-aws" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-aws" +readme = "README.md" +categories = [ + "development-tools::debugging", + "development-tools::profiling", +] +keywords = ["opentelemetry", "tracing"] +license = "Apache-2.0" +edition = "2021" +rust-version = "1.65" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[features] +default = ["trace"] +trace = ["opentelemetry/trace"] + +[dependencies] +once_cell = "1.12" +opentelemetry = { version = "0.21" } + +[dev-dependencies] +opentelemetry_sdk = { version = "0.21", features = ["trace", "testing"] } +opentelemetry-http = { version = "0.10" } +opentelemetry-stdout = { version = "0.2", features = ["trace"] } +hyper = { version = "0.14" } +tokio = { version = "1.0", features = ["macros", "rt"] } diff --git a/opentelemetry-aws/LICENSE b/opentelemetry-aws/LICENSE new file mode 100644 index 00000000..23a2acab --- /dev/null +++ b/opentelemetry-aws/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 The OpenTelemetry Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/opentelemetry-aws/README.md b/opentelemetry-aws/README.md new file mode 100644 index 00000000..7f6cefc7 --- /dev/null +++ b/opentelemetry-aws/README.md @@ -0,0 +1,27 @@ +![OpenTelemetry — An observability framework for cloud-native software.][splash] + +[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png + +# OpenTelemetry AWS + +Additional types for exporting [`OpenTelemetry`] data to AWS. + +[![Crates.io: opentelemetry-aws](https://img.shields.io/crates/v/opentelemetry-aws.svg)](https://crates.io/crates/opentelemetry-aws) +[![Documentation](https://docs.rs/opentelemetry-aws/badge.svg)](https://docs.rs/opentelemetry-aws) +[![LICENSE](https://img.shields.io/crates/l/opentelemetry-aws)](./LICENSE) +[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) + +## Overview + +[`OpenTelemetry`] is a collection of tools, APIs, and SDKs used to instrument, +generate, collect, and export telemetry data (metrics, logs, and traces) for +analysis in order to understand your software's performance and behavior. This +crate provides additional propagators and exporters for sending telemetry data +to AWS's telemetry platform. + +## Supported component + +Currently, this crate only supports `XRay` propagator. Contributions are welcome. + +[`OpenTelemetry`]: https://crates.io/crates/opentelemetry diff --git a/opentelemetry-aws/src/lib.rs b/opentelemetry-aws/src/lib.rs new file mode 100644 index 00000000..8a8d0e10 --- /dev/null +++ b/opentelemetry-aws/src/lib.rs @@ -0,0 +1,391 @@ +//! This crate provides unofficial integration with AWS services. +//! +//! # Components +//! As for now, the only components provided in this crate is AWS X-Ray propagator. +//! +//! ### AWS X-Ray Propagator +//! This propagator helps propagate tracing information from upstream services to downstream services. +//! +//! ### Quick start +//! ```no_run +//! use opentelemetry::{global, trace::{Tracer, TracerProvider as _}}; +//! use opentelemetry_aws::trace::XrayPropagator; +//! use opentelemetry_sdk::trace::TracerProvider; +//! use opentelemetry_stdout::SpanExporter; +//! use opentelemetry_http::HeaderInjector; +//! +//! #[tokio::main] +//! async fn main() -> std::result::Result<(), Box> { +//! // Set the global propagator to X-Ray propagator +//! global::set_text_map_propagator(XrayPropagator::default()); +//! let provider = TracerProvider::builder() +//! .with_simple_exporter(SpanExporter::default()) +//! .build(); +//! let tracer = provider.tracer("readme_example"); +//! +//! let mut req = hyper::Request::builder().uri("http://127.0.0.1:3000"); +//! tracer.in_span("doing_work", |cx| { +//! // Send request to downstream services. +//! // Build request +//! global::get_text_map_propagator(|propagator| { +//! // Set X-Ray tracing header in request object `req` +//! propagator.inject_context(&cx, &mut HeaderInjector(req.headers_mut().unwrap())); +//! println!("Headers: {:?}", req.headers_ref()); +//! }) +//! }); +//! +//! Ok(()) +//! } +//! ``` +//! A more detailed example can be found in [opentelemetry-rust](https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples/aws-xray) repo + +#[cfg(feature = "trace")] +pub use trace::XrayPropagator; + +#[cfg(feature = "trace")] +pub mod trace { + use once_cell::sync::Lazy; + use opentelemetry::{ + global::{self, Error}, + propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, + trace::{ + SpanContext, SpanId, TraceContextExt, TraceError, TraceFlags, TraceId, TraceState, + }, + Context, + }; + use std::borrow::Cow; + use std::convert::TryFrom; + + const AWS_XRAY_TRACE_HEADER: &str = "x-amzn-trace-id"; + const AWS_XRAY_VERSION_KEY: &str = "1"; + const HEADER_PARENT_KEY: &str = "Parent"; + const HEADER_ROOT_KEY: &str = "Root"; + const HEADER_SAMPLED_KEY: &str = "Sampled"; + + const SAMPLED: &str = "1"; + const NOT_SAMPLED: &str = "0"; + const REQUESTED_SAMPLE_DECISION: &str = "?"; + + const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); + + static AWS_XRAY_HEADER_FIELD: Lazy<[String; 1]> = + Lazy::new(|| [AWS_XRAY_TRACE_HEADER.to_owned()]); + + /// Extracts and injects `SpanContext`s into `Extractor`s or `Injector`s using AWS X-Ray header format. + /// + /// Extracts and injects values to/from the `x-amzn-trace-id` header. Converting between + /// OpenTelemetry [SpanContext][otel-spec] and [X-Ray Trace format][xray-trace-id]. + /// + /// For details on the [`x-amzn-trace-id` header][xray-header] see the AWS X-Ray Docs. + /// + /// ## Example + /// + /// ``` + /// use opentelemetry::global; + /// use opentelemetry_aws::trace::XrayPropagator; + /// + /// global::set_text_map_propagator(XrayPropagator::default()); + /// ``` + /// + /// [otel-spec]: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#SpanContext + /// [xray-trace-id]: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids + /// [xray-header]: https://docs.aws.amazon.com/xray/latest/devguide/xray-concepts.html#xray-concepts-tracingheader + #[derive(Clone, Debug, Default)] + pub struct XrayPropagator { + _private: (), + } + + /// Extract `SpanContext` from AWS X-Ray format string + /// + /// Extract OpenTelemetry [SpanContext][otel-spec] from [X-Ray Trace format][xray-trace-id] string. + /// + /// [otel-spec]: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#SpanContext + /// [xray-trace-id]: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids + pub fn span_context_from_str(value: &str) -> Option { + let parts: Vec<(&str, &str)> = value + .split_terminator(';') + .filter_map(from_key_value_pair) + .collect(); + + let mut trace_id = TraceId::INVALID; + let mut parent_segment_id = SpanId::INVALID; + let mut sampling_decision = TRACE_FLAG_DEFERRED; + let mut kv_vec = Vec::with_capacity(parts.len()); + + for (key, value) in parts { + match key { + HEADER_ROOT_KEY => match TraceId::try_from(XrayTraceId(Cow::from(value))) { + Err(_) => return None, + Ok(parsed) => trace_id = parsed, + }, + HEADER_PARENT_KEY => { + parent_segment_id = SpanId::from_hex(value).unwrap_or(SpanId::INVALID) + } + HEADER_SAMPLED_KEY => { + sampling_decision = match value { + NOT_SAMPLED => TraceFlags::default(), + SAMPLED => TraceFlags::SAMPLED, + REQUESTED_SAMPLE_DECISION => TRACE_FLAG_DEFERRED, + _ => TRACE_FLAG_DEFERRED, + } + } + _ => kv_vec.push((key.to_ascii_lowercase(), value.to_string())), + } + } + + match TraceState::from_key_value(kv_vec) { + Ok(trace_state) => { + if trace_id == TraceId::INVALID { + return None; + } + + Some(SpanContext::new( + trace_id, + parent_segment_id, + sampling_decision, + true, + trace_state, + )) + } + Err(trace_state_err) => { + global::handle_error(Error::Trace(TraceError::Other(Box::new(trace_state_err)))); + None //todo: assign an error type instead of using None + } + } + } + + /// Generate AWS X-Ray format string from `SpanContext` + /// + /// Generate [X-Ray Trace format][xray-trace-id] string from OpenTelemetry [SpanContext][otel-spec] + /// + /// [xray-trace-id]: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids + /// [otel-spec]: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#SpanContext + pub fn span_context_to_string(span_context: &SpanContext) -> Option { + if !span_context.is_valid() { + return None; + } + + let xray_trace_id = XrayTraceId::from(span_context.trace_id()); + + let sampling_decision = + if span_context.trace_flags() & TRACE_FLAG_DEFERRED == TRACE_FLAG_DEFERRED { + REQUESTED_SAMPLE_DECISION + } else if span_context.is_sampled() { + SAMPLED + } else { + NOT_SAMPLED + }; + + let trace_state_header = span_context + .trace_state() + .header_delimited("=", ";") + .split_terminator(';') + .map(title_case) + .collect::>() + .join(";"); + let trace_state_prefix = if trace_state_header.is_empty() { + "" + } else { + ";" + }; + + Some(format!( + "{}={};{}={:016x};{}={}{}{}", + HEADER_ROOT_KEY, + xray_trace_id.0, + HEADER_PARENT_KEY, + span_context.span_id(), + HEADER_SAMPLED_KEY, + sampling_decision, + trace_state_prefix, + trace_state_header + )) + } + + impl XrayPropagator { + /// Creates a new `XrayTraceContextPropagator`. + pub fn new() -> Self { + XrayPropagator::default() + } + + fn extract_span_context(&self, extractor: &dyn Extractor) -> Option { + span_context_from_str(extractor.get(AWS_XRAY_TRACE_HEADER)?.trim()) + } + } + + impl TextMapPropagator for XrayPropagator { + fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { + let span = cx.span(); + let span_context = span.span_context(); + if let Some(header_value) = span_context_to_string(span_context) { + injector.set(AWS_XRAY_TRACE_HEADER, header_value); + } + } + + fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { + self.extract_span_context(extractor) + .map(|sc| cx.with_remote_span_context(sc)) + .unwrap_or_else(|| cx.clone()) + } + + fn fields(&self) -> FieldIter<'_> { + FieldIter::new(AWS_XRAY_HEADER_FIELD.as_ref()) + } + } + + /// Holds an X-Ray formatted Trace ID + /// + /// A `trace_id` consists of three numbers separated by hyphens. For example, `1-58406520-a006649127e371903a2de979`. + /// This includes: + /// + /// * The version number, that is, 1. + /// * The time of the original request, in Unix epoch time, in 8 hexadecimal digits. + /// * For example, 10:00AM December 1st, 2016 PST in epoch time is 1480615200 seconds, or 58406520 in hexadecimal digits. + /// * A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits. + /// + /// See the [AWS X-Ray Documentation][xray-trace-id] for more details. + /// + /// [xray-trace-id]: https://docs.aws.amazon.com/xray/latest/devguide/xray-api-sendingdata.html#xray-api-traceids + #[derive(Clone, Debug, PartialEq)] + struct XrayTraceId<'a>(Cow<'a, str>); + + impl<'a> TryFrom> for TraceId { + type Error = (); + + fn try_from(id: XrayTraceId<'a>) -> Result { + let parts: Vec<&str> = id.0.split_terminator('-').collect(); + + if parts.len() != 3 { + return Err(()); + } + + let trace_id: TraceId = + TraceId::from_hex(format!("{}{}", parts[1], parts[2]).as_str()).map_err(|_| ())?; + + if trace_id == TraceId::INVALID { + Err(()) + } else { + Ok(trace_id) + } + } + } + + impl From for XrayTraceId<'static> { + fn from(trace_id: TraceId) -> Self { + let trace_id_as_hex = trace_id.to_string(); + let (timestamp, xray_id) = trace_id_as_hex.split_at(8_usize); + + XrayTraceId(Cow::from(format!( + "{}-{}-{}", + AWS_XRAY_VERSION_KEY, timestamp, xray_id + ))) + } + } + + fn from_key_value_pair(pair: &str) -> Option<(&str, &str)> { + let mut key_value_pair: Option<(&str, &str)> = None; + + if let Some(index) = pair.find('=') { + let (key, value) = pair.split_at(index); + key_value_pair = Some((key, value.trim_start_matches('='))); + } + key_value_pair + } + + fn title_case(s: &str) -> String { + let mut capitalized: String = String::with_capacity(s.len()); + + if !s.is_empty() { + let mut characters = s.chars(); + + if let Some(first) = characters.next() { + capitalized.push(first.to_ascii_uppercase()) + } + capitalized.extend(characters); + } + + capitalized + } + + #[cfg(test)] + mod tests { + use super::*; + use opentelemetry::trace::TraceState; + use opentelemetry_sdk::testing::trace::TestSpan; + use std::collections::HashMap; + use std::str::FromStr; + + #[rustfmt::skip] + fn extract_test_data() -> Vec<(&'static str, SpanContext)> { + vec![ + ("", SpanContext::empty_context()), + ("Sampled=1;Self=foo", SpanContext::empty_context()), + ("Root=1-bogus-bad", SpanContext::empty_context()), + ("Root=1-too-many-parts", SpanContext::empty_context()), + ("Root=1-58406520-a006649127e371903a2de979;Parent=garbage", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + ("Root=1-58406520-a006649127e371903a2de979;Sampled=1", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), + ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=0", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::default(), true, TraceState::default())), + ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=1", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::SAMPLED, true, TraceState::default())), + ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TRACE_FLAG_DEFERRED, true, TraceState::default())), + ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=?", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TRACE_FLAG_DEFERRED, true, TraceState::default())), + ("Root=1-58406520-a006649127e371903a2de979;Self=1-58406520-bf42676c05e20ba4a90e448e;Parent=4c721bf33e3caf8f;Sampled=1", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::SAMPLED, true, TraceState::from_str("self=1-58406520-bf42676c05e20ba4a90e448e").unwrap())), + ("Root=1-58406520-a006649127e371903a2de979;Self=1-58406520-bf42676c05e20ba4a90e448e;Parent=4c721bf33e3caf8f;Sampled=1;RandomKey=RandomValue", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::SAMPLED, true, TraceState::from_str("self=1-58406520-bf42676c05e20ba4a90e448e,randomkey=RandomValue").unwrap())), + ] + } + + #[rustfmt::skip] + fn inject_test_data() -> Vec<(&'static str, SpanContext)> { + vec![ + ("", SpanContext::empty_context()), + ("", SpanContext::new(TraceId::INVALID, SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + ("", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + ("", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), + ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=0", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::default(), true, TraceState::default())), + ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=1", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TraceFlags::SAMPLED, true, TraceState::default())), + ("Root=1-58406520-a006649127e371903a2de979;Parent=4c721bf33e3caf8f;Sampled=?;Self=1-58406520-bf42676c05e20ba4a90e448e;Randomkey=RandomValue", SpanContext::new(TraceId::from_hex("58406520a006649127e371903a2de979").unwrap(), SpanId::from_hex("4c721bf33e3caf8f").unwrap(), TRACE_FLAG_DEFERRED, true, TraceState::from_str("self=1-58406520-bf42676c05e20ba4a90e448e,randomkey=RandomValue").unwrap())), + ] + } + + #[test] + fn test_extract() { + for (header, expected) in extract_test_data() { + let map: HashMap = + vec![(AWS_XRAY_TRACE_HEADER.to_string(), header.to_string())] + .into_iter() + .collect(); + + let propagator = XrayPropagator::default(); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &expected); + } + } + + #[test] + fn test_extract_empty() { + let map: HashMap = HashMap::new(); + let propagator = XrayPropagator::default(); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &SpanContext::empty_context()) + } + + #[test] + fn test_inject() { + let propagator = XrayPropagator::default(); + for (header_value, span_context) in inject_test_data() { + let mut injector: HashMap = HashMap::new(); + propagator.inject_context( + &Context::current_with_span(TestSpan(span_context)), + &mut injector, + ); + + let injected_value: Option<&String> = injector.get(AWS_XRAY_TRACE_HEADER); + + if header_value.is_empty() { + assert!(injected_value.is_none()); + } else { + assert_eq!(injected_value, Some(&header_value.to_string())); + } + } + } + } +} diff --git a/opentelemetry-contrib/CHANGELOG.md b/opentelemetry-contrib/CHANGELOG.md new file mode 100644 index 00000000..d9425346 --- /dev/null +++ b/opentelemetry-contrib/CHANGELOG.md @@ -0,0 +1,100 @@ +# Changelog + +## vNext + +## v0.13.0 + +### Changed + +- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) +- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) + +## v0.12.0 + +### Added + +- Implement w3c trace context response propagation #998 + +### Changed + +- update to opentelemetry-api v0.20.0 + +## v0.11.0 + +### Changed +- Handle `parent_span_id` in jaeger JSON exporter [#907](https://github.com/open-telemetry/opentelemetry-rust/pull/907). +- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953). +- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). +- Implement w3c trace context response propagation [#998](https://github.com/open-telemetry/opentelemetry-rust/pull/998). + +## v0.10.0 + +### Added + +- Add jaeger JSON file exporter #814 + +### Changed + +- Rename binary propagator's functions #776 +- Update to opentelemetry v0.18.0 + +## v0.9.0 + +### Changed + +- Update to opentelemetry v0.17.0 + +## v0.8.0 + +### Changed + +- Update to opentelemetry v0.16.0 + +## v0.7.0 + +### Changed + +- Update to opentelemetry v0.15.0 + +## v0.6.0 + +### Changed + +- Update to opentelemetry v0.14.0 + +## v0.5.0 + +### Removed +- Moved aws related function to `opentelemetry-aws` crate. #446 +- Moved datadog related function to `opentelemetry-datadog` crate. #446 + +### Changed + +- Update to opentelemetry v0.13.0 + +## v0.4.0 + +### Changed + +- Update to opentelemetry v0.12.0 +- Support tokio v1.0 #421 +- Use opentelemetry-http for http integration #415 + +## v0.3.0 + +### Changed + +- Update to opentelemetry v0.11.0 + +## v0.2.0 + +### Changed + +- Update to opentelemetry v0.10.0 +- Move binary propagator and base64 format to this crate #343 + +## v0.1.0 + +### Added + +- Datadog exporter diff --git a/opentelemetry-contrib/CODEOWNERS b/opentelemetry-contrib/CODEOWNERS new file mode 100644 index 00000000..d6962a90 --- /dev/null +++ b/opentelemetry-contrib/CODEOWNERS @@ -0,0 +1,5 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. + +# For anything not explicitly taken by someone else: +* @open-telemetry/rust-approvers diff --git a/opentelemetry-contrib/Cargo.toml b/opentelemetry-contrib/Cargo.toml new file mode 100644 index 00000000..b860bc9c --- /dev/null +++ b/opentelemetry-contrib/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "opentelemetry-contrib" +version = "0.13.0" +description = "Rust contrib repo for OpenTelemetry" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-contrib" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-contrib" +readme = "README.md" +categories = [ + "development-tools::debugging", + "development-tools::profiling", +] +keywords = ["opentelemetry", "tracing"] +license = "Apache-2.0" +edition = "2021" +rust-version = "1.65" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[features] +api = [] +default = [] +base64_format = ["base64", "binary_propagator"] +binary_propagator = [] +jaeger_json_exporter = ["serde_json", "futures-core", "futures-util", "async-trait", "opentelemetry-semantic-conventions"] +rt-tokio = ["tokio", "opentelemetry_sdk/rt-tokio"] +rt-tokio-current-thread = ["tokio", "opentelemetry_sdk/rt-tokio-current-thread"] +rt-async-std = ["async-std", "opentelemetry_sdk/rt-async-std"] + +[dependencies] +async-std = { version = "1.10", optional = true } +async-trait = { version = "0.1", optional = true } +base64 = { version = "0.13", optional = true } +futures-core = { version = "0.3", optional = true } +futures-util = { version = "0.3", optional = true, default-features = false } +once_cell = "1.17.1" +opentelemetry = { version = "0.21" } +opentelemetry_sdk = { version = "0.21", optional = true } +opentelemetry-semantic-conventions = { version = "0.13", optional = true } +serde_json = { version = "1", optional = true } +tokio = { version = "1.0", features = ["fs", "io-util"], optional = true } + +[dev-dependencies] +base64 = "0.13" +criterion = { version = "0.5", features = ["html_reports"] } +futures-util = { version = "0.3", default-features = false, features = ["std"] } +opentelemetry_sdk = { version = "0.21", features = ["trace", "testing"] } +[target.'cfg(not(target_os = "windows"))'.dev-dependencies] +pprof = { version = "0.13", features = ["flamegraph", "criterion"] } + +[[bench]] +name = "new_span" +harness = false +required-features = ["api"] diff --git a/opentelemetry-contrib/LICENSE b/opentelemetry-contrib/LICENSE new file mode 100644 index 00000000..23a2acab --- /dev/null +++ b/opentelemetry-contrib/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 The OpenTelemetry Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/opentelemetry-contrib/README.md b/opentelemetry-contrib/README.md new file mode 100644 index 00000000..5c8a00b4 --- /dev/null +++ b/opentelemetry-contrib/README.md @@ -0,0 +1,23 @@ +![OpenTelemetry — An observability framework for cloud-native software.][splash] + +[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png + +# OpenTelemetry Contrib + +Community supported vendor integrations for applications instrumented with [`OpenTelemetry`]. + +[![Crates.io: opentelemetry-contrib](https://img.shields.io/crates/v/opentelemetry-contrib.svg)](https://crates.io/crates/opentelemetry-contrib) +[![Documentation](https://docs.rs/opentelemetry-contrib/badge.svg)](https://docs.rs/opentelemetry-contrib) +[![LICENSE](https://img.shields.io/crates/l/opentelemetry-contrib)](./LICENSE) +[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) + +## Overview + +[`OpenTelemetry`] is a collection of tools, APIs, and SDKs used to instrument, +generate, collect, and export telemetry data (metrics, logs, and traces) for +analysis in order to understand your software's performance and behavior. This +crate provides additional propagators and exporters for sending telemetry data +to vendors or using experimental propagators like `base64`. + +[`OpenTelemetry`]: https://crates.io/crates/opentelemetry diff --git a/opentelemetry-contrib/benches/new_span.rs b/opentelemetry-contrib/benches/new_span.rs new file mode 100644 index 00000000..35fc0cc9 --- /dev/null +++ b/opentelemetry-contrib/benches/new_span.rs @@ -0,0 +1,183 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use futures_util::future::BoxFuture; +use opentelemetry::{ + global::BoxedTracer, + trace::{ + mark_span_as_active, noop::NoopTracer, SpanBuilder, SpanContext, SpanId, + TraceContextExt as _, TraceFlags, TraceId, TraceState, Tracer as _, TracerProvider as _, + }, + Context, ContextGuard, +}; +use opentelemetry_contrib::trace::{ + new_span_if_parent_sampled, new_span_if_recording, TracerSource, +}; +use opentelemetry_sdk::{ + export::trace::{ExportResult, SpanData, SpanExporter}, + trace::{config, Sampler, TracerProvider}, +}; +#[cfg(not(target_os = "windows"))] +use pprof::criterion::{Output, PProfProfiler}; +use std::fmt::Display; + +fn criterion_benchmark(c: &mut Criterion) { + let mut group = c.benchmark_group("new_span"); + group.throughput(Throughput::Elements(1)); + for env in [ + Environment::InContext, + Environment::NoContext, + Environment::NoSdk, + ] { + let (_provider, tracer, _guard) = env.setup(); + + for api in [Api::Alt, Api::Spec] { + let param = format!("{env}/{api}"); + group.bench_function( + BenchmarkId::new("if_parent_sampled", param.clone()), + // m2max, in-cx/alt: 530ns + // m2max, no-cx/alt: 5.9ns + // m2max, no-sdk/alt: 5.9ns + // m2max, in-cx/spec: 505ns + // m2max, no-cx/spec: 255ns + // m2max, no-sdk/spec: 170ns + |b| match api { + Api::Alt => b.iter(|| { + new_span_if_parent_sampled( + || SpanBuilder::from_name("new_span"), + TracerSource::borrowed(&tracer), + ) + .map(|cx| cx.attach()) + }), + Api::Spec => b.iter(|| mark_span_as_active(tracer.start("new_span"))), + }, + ); + group.bench_function( + BenchmarkId::new("if_recording", param.clone()), + // m2max, in-cx/alt: 8ns + // m2max, no-cx/alt: 5.9ns + // m2max, no-sdk/alt: 5.9ns + // m2max, in-cx/spec: 31ns + // m2max, no-cx/spec: 5.8ns + // m2max, no-sdk/spec: 5.7ns + |b| match api { + Api::Alt => b.iter(|| { + new_span_if_recording( + || SpanBuilder::from_name("new_span"), + TracerSource::borrowed(&tracer), + ) + .map(|cx| cx.attach()) + }), + Api::Spec => b.iter(|| { + Context::current() + .span() + .is_recording() + .then(|| mark_span_as_active(tracer.start("new_span"))) + }), + }, + ); + } + } +} + +#[derive(Copy, Clone)] +enum Api { + /// An alternative way which may be faster than what the spec recommends. + Alt, + /// The recommended way as proposed by the current opentelemetry specification. + Spec, +} + +impl Api { + const fn as_str(self) -> &'static str { + match self { + Api::Alt => "alt", + Api::Spec => "spec", + } + } +} + +impl Display for Api { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +#[derive(Copy, Clone)] +enum Environment { + /// There is an active span being sampled in the current context. + InContext, + /// There is no span in context (or there is not context). + NoContext, + /// An SDK has not been configured, so instrumentation should be noop. + NoSdk, +} + +impl Environment { + const fn as_str(self) -> &'static str { + match self { + Environment::InContext => "in-cx", + Environment::NoContext => "no-cx", + Environment::NoSdk => "no-sdk", + } + } + + fn setup(&self) -> (Option, BoxedTracer, Option) { + match self { + Environment::InContext => { + let guard = Context::current() + .with_remote_span_context(SpanContext::new( + TraceId::from(0x09251969), + SpanId::from(0x08171969), + TraceFlags::SAMPLED, + true, + TraceState::default(), + )) + .attach(); + let (provider, tracer) = parent_sampled_tracer(Sampler::AlwaysOff); + (Some(provider), tracer, Some(guard)) + } + Environment::NoContext => { + let (provider, tracer) = parent_sampled_tracer(Sampler::AlwaysOff); + (Some(provider), tracer, None) + } + Environment::NoSdk => (None, BoxedTracer::new(Box::new(NoopTracer::new())), None), + } + } +} + +impl Display for Environment { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +fn parent_sampled_tracer(inner_sampler: Sampler) -> (TracerProvider, BoxedTracer) { + let provider = TracerProvider::builder() + .with_config(config().with_sampler(Sampler::ParentBased(Box::new(inner_sampler)))) + .with_simple_exporter(NoopExporter) + .build(); + let tracer = provider.tracer(module_path!()); + (provider, BoxedTracer::new(Box::new(tracer))) +} + +#[derive(Debug)] +struct NoopExporter; + +impl SpanExporter for NoopExporter { + fn export(&mut self, _spans: Vec) -> BoxFuture<'static, ExportResult> { + Box::pin(futures_util::future::ready(Ok(()))) + } +} + +#[cfg(not(target_os = "windows"))] +criterion_group! { + name = benches; + config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = criterion_benchmark +} +#[cfg(target_os = "windows")] +criterion_group! { + name = benches; + config = Criterion::default(); + targets = criterion_benchmark +} +criterion_main!(benches); diff --git a/opentelemetry-contrib/src/lib.rs b/opentelemetry-contrib/src/lib.rs new file mode 100644 index 00000000..7c54ebcf --- /dev/null +++ b/opentelemetry-contrib/src/lib.rs @@ -0,0 +1,33 @@ +//! # OpenTelemetry Contrib +//! +//! This is a library for extensions that are not part of the core API, but still may be useful for +//! some users. +//! +//! Typically, those include vendor specific propagators. +//! +//! ## Crate Feature Flags +//! +//! The following crate feature flags are available: +//! +//! * `binary-propagator`: Adds Experimental binary propagator to propagate trace context using binary format. +//! * `base64-format`: Enables base64 format support for binary propagators. +#![warn( + future_incompatible, + missing_debug_implementations, + missing_docs, + nonstandard_style, + rust_2018_idioms, + unreachable_pub, + unused +)] +#![cfg_attr( + docsrs, + feature(doc_cfg, doc_auto_cfg), + deny(rustdoc::broken_intra_doc_links) +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo.svg" +)] +#![cfg_attr(test, deny(warnings))] + +pub mod trace; diff --git a/opentelemetry-contrib/src/trace/context.rs b/opentelemetry-contrib/src/trace/context.rs new file mode 100644 index 00000000..f196f559 --- /dev/null +++ b/opentelemetry-contrib/src/trace/context.rs @@ -0,0 +1,184 @@ +use super::TracerSource; +use opentelemetry::{ + trace::{SpanBuilder, TraceContextExt as _, Tracer as _}, + Context, ContextGuard, +}; +use std::{ + fmt::{Debug, Formatter}, + ops::{Deref, DerefMut}, +}; + +/// Lazily creates a new span only if the current context has an active span, +/// which will used as the new span's parent. +/// +/// This is useful for instrumenting library crates whose activities would be +/// undesirable to see as root spans, by themselves, outside of any application +/// context. +/// +/// # Examples +/// +/// ``` +/// use opentelemetry::trace::{SpanBuilder}; +/// use opentelemetry_contrib::trace::{new_span_if_parent_sampled, TracerSource}; +/// +/// fn my_lib_fn() { +/// let _guard = new_span_if_parent_sampled( +/// || SpanBuilder::from_name("my span"), +/// TracerSource::lazy(&|| opentelemetry::global::tracer(module_path!())), +/// ) +/// .map(|cx| cx.attach()); +/// } +/// ``` +pub fn new_span_if_parent_sampled( + builder_fn: impl FnOnce() -> SpanBuilder, + tracer: TracerSource<'_>, +) -> Option { + Context::map_current(|current| { + current.span().span_context().is_sampled().then(|| { + let builder = builder_fn(); + let span = tracer.get().build_with_context(builder, current); + current.with_span(span) + }) + }) +} + +/// Lazily creates a new span only if the current context has a recording span, +/// which will used as the new span's parent. +/// +/// This is useful for instrumenting library crates whose activities would be +/// undesirable to see as root spans, by themselves, outside of any application +/// context. +/// +/// # Examples +/// +/// ``` +/// use opentelemetry::trace::{SpanBuilder}; +/// use opentelemetry_contrib::trace::{new_span_if_recording, TracerSource}; +/// +/// fn my_lib_fn() { +/// let _guard = new_span_if_recording( +/// || SpanBuilder::from_name("my span"), +/// TracerSource::lazy(&|| opentelemetry::global::tracer(module_path!())), +/// ) +/// .map(|cx| cx.attach()); +/// } +/// ``` +pub fn new_span_if_recording( + builder_fn: impl FnOnce() -> SpanBuilder, + tracer: TracerSource<'_>, +) -> Option { + Context::map_current(|current| { + current.span().is_recording().then(|| { + let builder = builder_fn(); + let span = tracer.get().build_with_context(builder, current); + current.with_span(span) + }) + }) +} + +/// Carries anything with an optional `opentelemetry::Context`. +/// +/// A `Contextualized` is a smart pointer which owns and instance of `T` and +/// dereferences to it automatically. The instance of `T` and its associated +/// optional `Context` can be reacquired using the `Into` trait for the associated +/// tuple type. +/// +/// This type is mostly useful when sending `T`'s through channels with logical +/// context propagation. +/// +/// # Examples +/// +/// ``` +/// use opentelemetry::trace::{SpanBuilder, TraceContextExt as _}; +/// use opentelemetry_contrib::trace::{new_span_if_parent_sampled, Contextualized, TracerSource}; + +/// enum Message{Command}; +/// let (tx, rx) = std::sync::mpsc::channel(); +/// +/// let cx = new_span_if_parent_sampled( +/// || SpanBuilder::from_name("my command"), +/// TracerSource::lazy(&|| opentelemetry::global::tracer(module_path!())), +/// ); +/// tx.send(Contextualized::new(Message::Command, cx)); +/// +/// let msg = rx.recv().unwrap(); +/// let (msg, cx) = msg.into_inner(); +/// let _guard = cx.filter(|cx| cx.has_active_span()).map(|cx| { +/// cx.span().add_event("command received", vec![]); +/// cx.attach() +/// }); +/// ``` +pub struct Contextualized(T, Option); + +impl Contextualized { + /// Creates a new instance using the specified value and optional context. + pub fn new(value: T, cx: Option) -> Self { + Self(value, cx) + } + + /// Creates a new instance using the specified value and current context if + /// it has an active span. + pub fn pass_thru(value: T) -> Self { + Self::new( + value, + Context::map_current(|current| current.has_active_span().then(|| current.clone())), + ) + } + + /// Convert self into its constituent parts, returning a tuple. + pub fn into_inner(self) -> (T, Option) { + (self.0, self.1) + } + + /// Attach the contained context if it exists and return both the + /// associated value and an optional guard for the attached context. + pub fn attach(self) -> (T, Option) { + (self.0, self.1.map(|cx| cx.attach())) + } +} + +impl Clone for Contextualized { + fn clone(&self) -> Self { + Self(self.0.clone(), self.1.clone()) + } +} + +impl Debug for Contextualized { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("Contextualized") + .field(&self.0) + .field(&self.1) + .finish() + } +} + +impl Deref for Contextualized { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Contextualized { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn cover_contextualized() { + let cx = Contextualized::new(17, None); + let (i, cx) = cx.into_inner(); + assert_eq!(i, 17); + assert!(cx.is_none()); + + let cx = Contextualized::pass_thru(17); + let (i, _guard) = cx.attach(); + assert_eq!(i, 17); + } +} diff --git a/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs b/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs new file mode 100644 index 00000000..38b25aa0 --- /dev/null +++ b/opentelemetry-contrib/src/trace/exporter/jaeger_json.rs @@ -0,0 +1,310 @@ +//! # Jaeger JSON file Exporter +//! + +use async_trait::async_trait; +use futures_core::future::BoxFuture; +use futures_util::FutureExt; +use opentelemetry::trace::{SpanId, TraceError}; +use opentelemetry_sdk::{ + export::trace::{ExportResult, SpanData, SpanExporter}, + runtime::RuntimeChannel, + trace::{Tracer, TracerProvider}, +}; +use opentelemetry_semantic_conventions::SCHEMA_URL; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::time::SystemTime; + +/// An exporter for jaeger comptible json files containing trace data +#[derive(Debug)] +pub struct JaegerJsonExporter { + out_path: PathBuf, + file_prefix: String, + service_name: String, + runtime: R, +} + +impl JaegerJsonExporter { + /// Configure a new jaeger-json exporter + /// + /// * `out_path` refers to an directory where span data are written. If it does not exist, it is created by the exporter + /// * `file_prefix` refers to a prefix prependend to each span file + /// * `service_name` is used to identify the corresponding service in jaeger + /// * `runtime` specifies the used async runtime to write the trace data + pub fn new(out_path: PathBuf, file_prefix: String, service_name: String, runtime: R) -> Self { + Self { + out_path, + file_prefix, + service_name, + runtime, + } + } + + /// Install the exporter using the internal provided runtime + pub fn install_batch(self) -> Tracer { + let runtime = self.runtime.clone(); + let provider_builder = TracerProvider::builder().with_batch_exporter(self, runtime); + let provider = provider_builder.build(); + let tracer = opentelemetry::trace::TracerProvider::versioned_tracer( + &provider, + "opentelemetry", + Some(env!("CARGO_PKG_VERSION")), + Some(SCHEMA_URL), + None, + ); + let _ = opentelemetry::global::set_tracer_provider(provider); + + tracer + } +} + +impl SpanExporter for JaegerJsonExporter { + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { + let mut trace_map = HashMap::new(); + + for span in batch { + let ctx = &span.span_context; + trace_map + .entry(ctx.trace_id()) + .or_insert_with(Vec::new) + .push(span_data_to_jaeger_json(span)); + } + + let data = trace_map + .into_iter() + .map(|(trace_id, spans)| { + serde_json::json!({ + "traceID": trace_id.to_string(), + "spans": spans, + "processes": { + "p1": { + "serviceName": self.service_name, + "tags": [] + } + } + }) + }) + .collect::>(); + + let json = serde_json::json!({ + "data": data, + }); + + let runtime = self.runtime.clone(); + let out_path = self.out_path.clone(); + let file_prefix = self.file_prefix.clone(); + + async move { + runtime.create_dir(&out_path).await?; + + let file_name = out_path.join(format!( + "{}-{}.json", + file_prefix, + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("This does not fail") + .as_secs() + )); + runtime + .write_to_file( + &file_name, + &serde_json::to_vec(&json).expect("This is a valid json value"), + ) + .await?; + + Ok(()) + } + .boxed() + } +} + +fn span_data_to_jaeger_json(span: SpanData) -> serde_json::Value { + let events = span + .events + .iter() + .map(|e| { + let mut fields = e + .attributes + .iter() + .map(|a| { + let (tpe, value) = opentelemetry_value_to_json(&a.value); + serde_json::json!({ + "key": a.key.as_str(), + "type": tpe, + "value": value, + }) + }) + .collect::>(); + fields.push(serde_json::json!({ + "key": "event", + "type": "string", + "value": e.name, + })); + + serde_json::json!({ + "timestamp": e.timestamp.duration_since(SystemTime::UNIX_EPOCH).expect("This does not fail").as_micros() as i64, + "fields": fields, + }) + }) + .collect::>(); + let tags = span + .attributes + .iter() + .map(|kv| { + let (tpe, value) = opentelemetry_value_to_json(&kv.value); + serde_json::json!({ + "key": kv.key.as_str(), + "type": tpe, + "value": value, + }) + }) + .collect::>(); + let mut references = if span.links.is_empty() { + None + } else { + Some( + span.links + .iter() + .map(|link| { + let span_context = &link.span_context; + serde_json::json!({ + "refType": "FOLLOWS_FROM", + "traceID": span_context.trace_id().to_string(), + "spanID": span_context.span_id().to_string(), + }) + }) + .collect::>(), + ) + }; + if span.parent_span_id != SpanId::INVALID { + let val = serde_json::json!({ + "refType": "CHILD_OF", + "traceID": span.span_context.trace_id().to_string(), + "spanID": span.parent_span_id.to_string(), + }); + references.get_or_insert_with(Vec::new).push(val); + } + serde_json::json!({ + "traceID": span.span_context.trace_id().to_string(), + "spanID": span.span_context.span_id().to_string(), + "startTime": span.start_time.duration_since(SystemTime::UNIX_EPOCH).expect("This does not fail").as_micros() as i64, + "duration": span.end_time.duration_since(span.start_time).expect("This does not fail").as_micros() as i64, + "operationName": span.name, + "tags": tags, + "logs": events, + "flags": span.span_context.trace_flags().to_u8(), + "processID": "p1", + "warnings": None::, + "references": references, + }) +} + +fn opentelemetry_value_to_json(value: &opentelemetry::Value) -> (&str, serde_json::Value) { + match value { + opentelemetry::Value::Bool(b) => ("bool", serde_json::json!(b)), + opentelemetry::Value::I64(i) => ("int64", serde_json::json!(i)), + opentelemetry::Value::F64(f) => ("float64", serde_json::json!(f)), + opentelemetry::Value::String(s) => ("string", serde_json::json!(s.as_str())), + v @ opentelemetry::Value::Array(_) => ("string", serde_json::json!(v.to_string())), + } +} + +/// Jaeger Json Runtime is an extension to [`RuntimeChannel`]. +/// +/// [`RuntimeChannel`]: opentelemetry_sdk::runtime::RuntimeChannel +#[async_trait] +pub trait JaegerJsonRuntime: RuntimeChannel + std::fmt::Debug { + /// Create a new directory if the given path does not exist yet + async fn create_dir(&self, path: &Path) -> ExportResult; + /// Write the provided content to a new file at the given path + async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult; +} + +#[cfg(feature = "rt-tokio")] +#[async_trait] +impl JaegerJsonRuntime for opentelemetry_sdk::runtime::Tokio { + async fn create_dir(&self, path: &Path) -> ExportResult { + if tokio::fs::metadata(path).await.is_err() { + tokio::fs::create_dir_all(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))? + } + + Ok(()) + } + + async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult { + use tokio::io::AsyncWriteExt; + + let mut file = tokio::fs::File::create(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.write_all(content) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.sync_data() + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + + Ok(()) + } +} + +#[cfg(feature = "rt-tokio-current-thread")] +#[async_trait] +impl JaegerJsonRuntime for opentelemetry_sdk::runtime::TokioCurrentThread { + async fn create_dir(&self, path: &Path) -> ExportResult { + if tokio::fs::metadata(path).await.is_err() { + tokio::fs::create_dir_all(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))? + } + + Ok(()) + } + + async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult { + use tokio::io::AsyncWriteExt; + + let mut file = tokio::fs::File::create(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.write_all(content) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.sync_data() + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + + Ok(()) + } +} + +#[cfg(feature = "rt-async-std")] +#[async_trait] +impl JaegerJsonRuntime for opentelemetry_sdk::runtime::AsyncStd { + async fn create_dir(&self, path: &Path) -> ExportResult { + if async_std::fs::metadata(path).await.is_err() { + async_std::fs::create_dir_all(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + } + Ok(()) + } + + async fn write_to_file(&self, path: &Path, content: &[u8]) -> ExportResult { + use async_std::io::WriteExt; + + let mut file = async_std::fs::File::create(path) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.write_all(content) + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + file.sync_data() + .await + .map_err(|e| TraceError::Other(Box::new(e)))?; + + Ok(()) + } +} diff --git a/opentelemetry-contrib/src/trace/exporter/mod.rs b/opentelemetry-contrib/src/trace/exporter/mod.rs new file mode 100644 index 00000000..6bef8e55 --- /dev/null +++ b/opentelemetry-contrib/src/trace/exporter/mod.rs @@ -0,0 +1,13 @@ +//! # Opentelemetry exporter contrib +//! +//! This module provides exporters for third party vendor format or experimental propagators that +//! aren't part of Opentelemetry. +//! +//! Currently, the following exporters are supported: +//! +//! * `jaeger_json`, which allows to export traces into files using jaegers json format +//! +//! This module also provides relative types for those exporters. + +#[cfg(feature = "jaeger_json_exporter")] +pub mod jaeger_json; diff --git a/opentelemetry-contrib/src/trace/mod.rs b/opentelemetry-contrib/src/trace/mod.rs new file mode 100644 index 00000000..97ee0db7 --- /dev/null +++ b/opentelemetry-contrib/src/trace/mod.rs @@ -0,0 +1,15 @@ +//! # Opentelemetry trace contrib +//! + +#[cfg(feature = "api")] +mod context; +#[cfg(feature = "api")] +pub use context::{new_span_if_parent_sampled, new_span_if_recording, Contextualized}; + +pub mod exporter; +pub mod propagator; + +#[cfg(feature = "api")] +mod tracer_source; +#[cfg(feature = "api")] +pub use tracer_source::TracerSource; diff --git a/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs b/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs new file mode 100644 index 00000000..45712b5e --- /dev/null +++ b/opentelemetry-contrib/src/trace/propagator/binary/base64_format.rs @@ -0,0 +1,91 @@ +//! # Base64 Format +//! +//! `Base64Format` is a formatter to serialize and deserialize a +//! value into a base64 format. +//! +//! `Base64Format` MUST expose the APIs that serializes values into base64 strings, +//! and deserializes values from base64 strings. There is a blanket implementation +//! for any implementors of `BinaryFormat` +#[cfg(feature = "binary_propagator")] +use crate::trace::propagator::binary::binary_propagator::BinaryFormat; + +use base64::{decode, encode}; +use opentelemetry::trace::SpanContext; + +/// Used to serialize and deserialize `SpanContext`s to and from a base64 +/// representation. +pub trait Base64Format { + /// Serializes span context into a base64 encoded string + fn serialize_into_base64(&self, context: &SpanContext) -> String; + + /// Deserialize a span context from a base64 encoded string + fn deserialize_from_base64(&self, base64: &str) -> SpanContext; +} + +impl Base64Format for Format +where + Format: BinaryFormat, +{ + fn serialize_into_base64(&self, context: &SpanContext) -> String { + encode(self.serialize_into_bytes(context)) + } + + fn deserialize_from_base64(&self, base64: &str) -> SpanContext { + if let Ok(bytes) = decode(base64.as_bytes()) { + self.deserialize_from_bytes(bytes) + } else { + SpanContext::empty_context() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::trace::propagator::binary::binary_propagator::BinaryPropagator; + use opentelemetry::trace::{SpanId, TraceFlags, TraceId, TraceState}; + + #[rustfmt::skip] + fn to_base64_data() -> Vec<(SpanContext, String)> { + vec![ + (SpanContext::new( + TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), + SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), + "AABL+S81d7NNpqPOkp0ODkc2AQDwZ6oLqQK3AgE=".to_string() + ), + (SpanContext::new( + TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), + SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), + "AABL+S81d7NNpqPOkp0ODkc2AQDwZ6oLqQK3AgA=".to_string() + ), + ] + } + + #[rustfmt::skip] + fn from_base64_data() -> Vec<(SpanContext, String)> { + vec![ + (SpanContext::empty_context(), "invalid base64 string".to_string()) + ] + } + + #[test] + fn serialize_into_base64_conversion() { + let propagator = BinaryPropagator::new(); + + for (context, data) in to_base64_data() { + assert_eq!(propagator.serialize_into_base64(&context), data) + } + } + + #[test] + fn deserialize_from_base64_conversion() { + let propagator = BinaryPropagator::new(); + + for (context, data) in from_base64_data() { + assert_eq!(propagator.deserialize_from_base64(&data), context) + } + for (context, data) in to_base64_data() { + assert_eq!(propagator.deserialize_from_base64(&data), context) + } + } +} diff --git a/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs b/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs new file mode 100644 index 00000000..2f8eba73 --- /dev/null +++ b/opentelemetry-contrib/src/trace/propagator/binary/binary_propagator.rs @@ -0,0 +1,178 @@ +//! # Binary Propagator +//! +//! `BinaryFormat` is a formatter to serialize and deserialize a +//! value into a binary format. +//! +//! `BinaryFormat` MUST expose the APIs that serializes values into bytes, +//! and deserializes values from bytes. +use opentelemetry::trace::{SpanContext, SpanId, TraceFlags, TraceId, TraceState}; +use std::convert::TryInto; + +/// Used to serialize and deserialize `SpanContext`s to and from a binary +/// representation. +pub trait BinaryFormat { + /// Serializes span context into a byte array and returns the array. + fn serialize_into_bytes(&self, context: &SpanContext) -> [u8; 29]; + + /// Deserializes a span context from a byte array. + fn deserialize_from_bytes(&self, bytes: Vec) -> SpanContext; +} + +/// Extracts and injects `SpanContext`s from byte arrays. +#[derive(Debug, Default)] +pub struct BinaryPropagator {} + +impl BinaryPropagator { + /// Create a new binary propagator. + pub fn new() -> Self { + BinaryPropagator {} + } +} + +impl BinaryFormat for BinaryPropagator { + /// Serializes span context into a byte array and returns the array. + fn serialize_into_bytes(&self, context: &SpanContext) -> [u8; 29] { + let mut res = [0u8; 29]; + if !context.is_valid() { + return res; + } + res[2..18].copy_from_slice(&context.trace_id().to_bytes()); + res[18] = 1; + res[19..27].copy_from_slice(&context.span_id().to_bytes()); + res[27] = 2; + res[28] = context.trace_flags().to_u8(); + + res + } + + /// Deserializes a span context from a byte array. + fn deserialize_from_bytes(&self, bytes: Vec) -> SpanContext { + if bytes.is_empty() { + return SpanContext::empty_context(); + } + let trace_id: [u8; 16]; + let mut span_id = [0; 8]; + let mut trace_flags = 0; + let mut b = &bytes[1..]; + if b.len() >= 17 && b[0] == 0 { + trace_id = b[1..17].try_into().unwrap(); + b = &b[17..]; + } else { + return SpanContext::empty_context(); + } + if b.len() >= 9 && b[0] == 1 { + span_id = b[1..9].try_into().unwrap(); + b = &b[9..]; + } + if b.len() >= 2 && b[0] == 2 { + trace_flags = b[1] + } + + let span_context = SpanContext::new( + TraceId::from_bytes(trace_id), + SpanId::from_bytes(span_id), + TraceFlags::new(trace_flags), + true, + // TODO traceparent and tracestate should both begin with a 0 byte, figure out how to differentiate + TraceState::default(), + ); + + if span_context.is_valid() { + span_context + } else { + SpanContext::empty_context() + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use opentelemetry::trace::TraceState; + + #[rustfmt::skip] + fn to_bytes_data() -> Vec<(SpanContext, [u8; 29])> { + vec![ + // Context with sampled + (SpanContext::new( + TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), + SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), [ + 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, + 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, + 0x02, 0x01, + ]), + // Context without sampled + (SpanContext::new( + TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), + SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), [ + 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, + 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, + 0x02, 0x00, + ]), + // Invalid context + (SpanContext::empty_context(), [0u8; 29]), + ] + } + + #[rustfmt::skip] + fn from_bytes_data() -> Vec<(SpanContext, Vec)> { + vec![ + // Future version of the proto + (SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), vec![ + 0x02, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, + 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, + 0x02, 0x01, + ]), + // current version with sampled + (SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default()), vec![ + 0x02, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, + 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, + 0x02, 0x01, + ]), + // valid context without option + (SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736),SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default()), vec![ + 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, + 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, + ]), + // zero trace id + (SpanContext::empty_context(), vec![ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x01, + ]), + // zero span id + (SpanContext::empty_context(), vec![ + 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x01, + ]), + // wrong trace id field number + (SpanContext::empty_context(), vec![ + 0x00, 0x01, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, 0xa6, 0xa3, 0xce, 0x92, 0x9d, 0x0e, 0x0e, 0x47, 0x36, + 0x01, 0x00, 0xf0, 0x67, 0xaa, 0x0b, 0xa9, 0x02, 0xb7, + ]), + // short byte array + (SpanContext::empty_context(), vec![ + 0x00, 0x00, 0x4b, 0xf9, 0x2f, 0x35, 0x77, 0xb3, 0x4d, + ]), + ] + } + + #[test] + fn serialize_into_bytes_conversion() { + let propagator = BinaryPropagator::new(); + + for (context, data) in to_bytes_data() { + assert_eq!(propagator.serialize_into_bytes(&context), data) + } + } + + #[test] + fn deserialize_from_bytes_conversion() { + let propagator = BinaryPropagator::new(); + + for (context, data) in from_bytes_data() { + assert_eq!(propagator.deserialize_from_bytes(data), context) + } + } +} diff --git a/opentelemetry-contrib/src/trace/propagator/binary/mod.rs b/opentelemetry-contrib/src/trace/propagator/binary/mod.rs new file mode 100644 index 00000000..6b16bd4b --- /dev/null +++ b/opentelemetry-contrib/src/trace/propagator/binary/mod.rs @@ -0,0 +1,42 @@ +//! # OpenTelemetry Experimental Propagator interface +//! +//! ## Binary Format +//! +//! `BinaryFormat` is a formatter to serialize and deserialize a value +//! into a binary format. +//! +//! `BinaryFormat` MUST expose the APIs that serializes values into bytes, +//! and deserializes values from bytes. +//! +//! ### ToBytes +//! +//! Serializes the given value into the on-the-wire representation. +//! +//! Required arguments: +//! +//! - the value to serialize, can be `SpanContext` or `DistributedContext`. +//! +//! Returns the on-the-wire byte representation of the value. +//! +//! ### FromBytes +//! +//! Creates a value from the given on-the-wire encoded representation. +//! +//! If the value could not be parsed, the underlying implementation +//! SHOULD decide to return ether an empty value, an invalid value, or +//! a valid value. +//! +//! Required arguments: +//! +//! - on-the-wire byte representation of the value. +//! +//! Returns a value deserialized from bytes. +//! + +#[cfg(feature = "base64")] +mod base64_format; +mod binary_propagator; + +#[cfg(feature = "base64")] +pub use base64_format::Base64Format; +pub use binary_propagator::{BinaryFormat, BinaryPropagator}; diff --git a/opentelemetry-contrib/src/trace/propagator/mod.rs b/opentelemetry-contrib/src/trace/propagator/mod.rs new file mode 100644 index 00000000..eb9c61b6 --- /dev/null +++ b/opentelemetry-contrib/src/trace/propagator/mod.rs @@ -0,0 +1,12 @@ +//! # Opentelemetry propagator contrib +//! +//! This module provides propagators for third party vendor format or experimental propagators that +//! aren't part of Opentelemetry. +//! +//! Currently, the following propagators are supported: +//! +//! * `binary_propagator`, propagating trace context in the binary format. +//! +//! This module also provides relative types for those propagators. +pub mod binary; +pub mod trace_context_response; diff --git a/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs b/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs new file mode 100644 index 00000000..45098ba8 --- /dev/null +++ b/opentelemetry-contrib/src/trace/propagator/trace_context_response.rs @@ -0,0 +1,236 @@ +//! # W3C Trace Context HTTP Response Propagator +//! +//! The traceresponse HTTP response header field identifies a completed request +//! in a tracing system. It has four fields: +//! +//! - version +//! - trace-id +//! - parent-id +//! - trace-flags +//! +//! See the [w3c trace-context docs] for more details. +//! +//! [w3c trace-context docs]: https://w3c.github.io/trace-context/#traceresponse-header +use once_cell::sync::Lazy; +use opentelemetry::{ + propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, + trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, + Context, +}; + +const SUPPORTED_VERSION: u8 = 0; +const MAX_VERSION: u8 = 254; +const TRACERESPONSE_HEADER: &str = "traceresponse"; + +static TRACE_CONTEXT_HEADER_FIELDS: Lazy<[String; 1]> = + Lazy::new(|| [TRACERESPONSE_HEADER.to_owned()]); + +/// Propagates trace response using the [W3C TraceContext] format +/// +/// [W3C TraceContext]: https://w3c.github.io/trace-context/#traceresponse-header +#[derive(Clone, Debug, Default)] +pub struct TraceContextResponsePropagator { + _private: (), +} + +impl TraceContextResponsePropagator { + /// Create a new `TraceContextPropagator`. + pub fn new() -> Self { + TraceContextResponsePropagator { _private: () } + } + + /// Extract span context from w3c trace-context header. + fn extract_span_context(&self, extractor: &dyn Extractor) -> Result { + let header_value = extractor.get(TRACERESPONSE_HEADER).unwrap_or("").trim(); + let parts = header_value.split_terminator('-').collect::>(); + // Ensure parts are not out of range. + if parts.len() < 4 { + return Err(()); + } + + // Ensure version is within range, for version 0 there must be 4 parts. + let version = u8::from_str_radix(parts[0], 16).map_err(|_| ())?; + if version > MAX_VERSION || version == 0 && parts.len() != 4 { + return Err(()); + } + + // Ensure trace id is lowercase + if parts[1].chars().any(|c| c.is_ascii_uppercase()) { + return Err(()); + } + + // Parse trace id section + let trace_id = TraceId::from_hex(parts[1]).map_err(|_| ())?; + + // Ensure span id is lowercase + if parts[2].chars().any(|c| c.is_ascii_uppercase()) { + return Err(()); + } + + // Parse span id section + let span_id = SpanId::from_hex(parts[2]).map_err(|_| ())?; + + // Parse trace flags section + let opts = u8::from_str_radix(parts[3], 16).map_err(|_| ())?; + + // Ensure opts are valid for version 0 + if version == 0 && opts > 2 { + return Err(()); + } + + // Build trace flags clearing all flags other than the trace-context + // supported sampling bit. + let trace_flags = TraceFlags::new(opts) & TraceFlags::SAMPLED; + + // create context + let span_context = + SpanContext::new(trace_id, span_id, trace_flags, true, TraceState::default()); + + // Ensure span is valid + if !span_context.is_valid() { + return Err(()); + } + + Ok(span_context) + } +} + +impl TextMapPropagator for TraceContextResponsePropagator { + /// Properly encodes the values of the `SpanContext` and injects them + /// into the `Injector`. + fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { + let span = cx.span(); + let span_context = span.span_context(); + if span_context.is_valid() { + let header_value = format!( + "{:02x}-{}-{}-{:02x}", + SUPPORTED_VERSION, + span_context.trace_id(), + span_context.span_id(), + span_context.trace_flags() & TraceFlags::SAMPLED + ); + injector.set(TRACERESPONSE_HEADER, header_value); + } + } + + /// Retrieves encoded `SpanContext`s using the `Extractor`. It decodes + /// the `SpanContext` and returns it. If no `SpanContext` was retrieved + /// OR if the retrieved SpanContext is invalid then an empty `SpanContext` + /// is returned. + fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { + self.extract_span_context(extractor) + .map(|sc| cx.with_remote_span_context(sc)) + .unwrap_or_else(|_| cx.clone()) + } + + fn fields(&self) -> FieldIter<'_> { + FieldIter::new(TRACE_CONTEXT_HEADER_FIELDS.as_ref()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use opentelemetry::{ + propagation::{Extractor, TextMapPropagator}, + testing::trace::TestSpan, + trace::{SpanContext, SpanId, TraceId, TraceState}, + }; + use std::{collections::HashMap, str::FromStr}; + + #[rustfmt::skip] + fn extract_data() -> Vec<(&'static str, SpanContext)> { + vec![ + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-08", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::default())), + ("02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-XYZxsf09", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01-", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ("01-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::default())), + ] + } + + #[rustfmt::skip] + fn extract_data_invalid() -> Vec<(&'static str, &'static str)> { + vec![ + ("0000-00000000000000000000000000000000-0000000000000000-01", "wrong version length"), + ("00-ab00000000000000000000000000000000-cd00000000000000-01", "wrong trace ID length"), + ("00-ab000000000000000000000000000000-cd0000000000000000-01", "wrong span ID length"), + ("00-ab000000000000000000000000000000-cd00000000000000-0100", "wrong trace flag length"), + ("qw-00000000000000000000000000000000-0000000000000000-01", "bogus version"), + ("00-qw000000000000000000000000000000-cd00000000000000-01", "bogus trace ID"), + ("00-ab000000000000000000000000000000-qw00000000000000-01", "bogus span ID"), + ("00-ab000000000000000000000000000000-cd00000000000000-qw", "bogus trace flag"), + ("A0-00000000000000000000000000000000-0000000000000000-01", "upper case version"), + ("00-AB000000000000000000000000000000-cd00000000000000-01", "upper case trace ID"), + ("00-ab000000000000000000000000000000-CD00000000000000-01", "upper case span ID"), + ("00-ab000000000000000000000000000000-cd00000000000000-A1", "upper case trace flag"), + ("00-00000000000000000000000000000000-0000000000000000-01", "zero trace ID and span ID"), + ("00-ab000000000000000000000000000000-cd00000000000000-09", "trace-flag unused bits set"), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7", "missing options"), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-", "empty options"), + ] + } + + #[rustfmt::skip] + fn inject_data() -> Vec<(&'static str, SpanContext)> { + vec![ + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::SAMPLED, true, TraceState::from_str("foo=bar").unwrap())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::default(), true, TraceState::from_str("foo=bar").unwrap())), + ("00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", SpanContext::new(TraceId::from_u128(0x4bf9_2f35_77b3_4da6_a3ce_929d_0e0e_4736), SpanId::from_u64(0x00f0_67aa_0ba9_02b7), TraceFlags::new(0xff), true, TraceState::from_str("foo=bar").unwrap())), + ("", SpanContext::empty_context()), + ] + } + + #[test] + fn extract_w3c_traceresponse() { + let propagator = TraceContextResponsePropagator::new(); + + for (traceresponse, expected_context) in extract_data() { + let mut extractor = HashMap::new(); + extractor.insert(TRACERESPONSE_HEADER.to_string(), traceresponse.to_string()); + + assert_eq!( + propagator.extract(&extractor).span().span_context(), + &expected_context + ) + } + } + + #[test] + fn extract_w3c_traceresponse_reject_invalid() { + let propagator = TraceContextResponsePropagator::new(); + + for (invalid_header, reason) in extract_data_invalid() { + let mut extractor = HashMap::new(); + extractor.insert(TRACERESPONSE_HEADER.to_string(), invalid_header.to_string()); + + assert_eq!( + propagator.extract(&extractor).span().span_context(), + &SpanContext::empty_context(), + "{}", + reason + ) + } + } + + #[test] + fn inject_w3c_traceresponse() { + let propagator = TraceContextResponsePropagator::new(); + + for (expected_trace_response, context) in inject_data() { + let mut injector = HashMap::new(); + propagator.inject_context( + &Context::current_with_span(TestSpan(context)), + &mut injector, + ); + + assert_eq!( + Extractor::get(&injector, TRACERESPONSE_HEADER).unwrap_or(""), + expected_trace_response + ); + } + } +} diff --git a/opentelemetry-contrib/src/trace/tracer_source.rs b/opentelemetry-contrib/src/trace/tracer_source.rs new file mode 100644 index 00000000..fdef67a1 --- /dev/null +++ b/opentelemetry-contrib/src/trace/tracer_source.rs @@ -0,0 +1,58 @@ +//! Abstracts away details for acquiring a `Tracer` by instrumented libraries. +use once_cell::sync::OnceCell; +use opentelemetry::global::BoxedTracer; +use std::fmt::Debug; + +/// Holds either a borrowed `BoxedTracer` or a factory that can produce one when +/// and if needed. +/// +/// This unifies handling of obtaining a `Tracer` by library code optimizing for +/// common cases when it will never be needed. +#[derive(Debug)] +pub struct TracerSource<'a> { + variant: Variant<'a>, + tracer: OnceCell, +} + +enum Variant<'a> { + Borrowed(&'a BoxedTracer), + Lazy(&'a dyn Fn() -> BoxedTracer), +} + +impl<'a> TracerSource<'a> { + /// Construct an instance by borrowing the specified `BoxedTracer`. + pub fn borrowed(tracer: &'a BoxedTracer) -> Self { + Self { + variant: Variant::Borrowed(tracer), + tracer: OnceCell::new(), + } + } + + /// Construct an instance which may lazily produce a `BoxedTracer` using + /// the specified factory function. + pub fn lazy(factory: &'a dyn Fn() -> BoxedTracer) -> Self { + Self { + variant: Variant::Lazy(factory), + tracer: OnceCell::new(), + } + } + + /// Get the associated `BoxedTracer`, producing it if necessary. + pub fn get(&self) -> &BoxedTracer { + use Variant::*; + match self.variant { + Borrowed(tracer) => tracer, + Lazy(factory) => self.tracer.get_or_init(factory), + } + } +} + +impl<'a> Debug for Variant<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use Variant::*; + match self { + Borrowed(arg0) => f.debug_tuple("Borrowed").field(arg0).finish(), + Lazy(_arg0) => f.debug_tuple("Lazy").finish(), + } + } +} diff --git a/opentelemetry-datadog/CHANGELOG.md b/opentelemetry-datadog/CHANGELOG.md new file mode 100644 index 00000000..1064e5a5 --- /dev/null +++ b/opentelemetry-datadog/CHANGELOG.md @@ -0,0 +1,96 @@ +# Changelog + +## vNext + +WARNING The current version relies on features only in upstream git version. This should be modified before releasing. + +## v0.9.0 + +### Changed + +- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) +- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) + +### Fixed + +- Do not set an empty span as the active span when the propagator does not find a remote span. +- Change type signature of `with_http_client()` to use the provided generic as argument. + +## V0.8.0 + +### Changed + +- Update to opentelemetry-api v0.20.0 + +### Fixed + +- Fix the array encoding length of datadog version 05 exporter #1002 + +## v0.7.0 + +### Added +- [Breaking] Add support for unified tagging [#931](https://github.com/open-telemetry/opentelemetry-rust/pull/931). + +### Changed +- Update `opentelemetry` to 0.19 +- Update `opentelemetry-http` to 0.8 +- Update `opentelemetry-semantic-conventions` to 0.11. +- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953) +- Send resource with attributes [#880](https://github.com/open-telemetry/opentelemetry-rust/pull/880). +- Update msgpack accounting for sampling_priority [#903](https://github.com/open-telemetry/opentelemetry-rust/pull/903). +- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). + +## v0.6.0 + +### Changed + +- Allow custom mapping #770 +- Update to opentelemetry v0.18.0 +- Update to opentelemetry-http v0.7.0 +- Update to opentelemetry-semantic-conventions v0.10.0 +- Parse config endpoint to remove tailing slash #787 +- Add sampling priority tag in spans #792 + +## v0.5.0 + +### Changed + +- Update to opentelemetry v0.17.0 +- Update to opentelemetry-http v0.6.0 +- Update to opentelemetry-semantic-conventions v0.9.0 + +## v0.4.0 + +### Changed + +- Update to opentelemetry v0.16.0 + +## v0.3.1 + +### Fixed + +- `status_code` must be 0 or 1 #580 + +## v0.3.0 + +### Changed + +- Update to opentelemetry v0.15.0 + +## v0.2.0 + +### Changed + +- Disable optional features for reqwest +- Remove default surf features #546 +- Update to opentelemetry v0.14.0 + +## v0.1.0 + +### Added + +- Datadog exporter #446 +- Datadog propagator #440 + +### Changed +- Rename trace config with_default_sampler to with_sampler #482 diff --git a/opentelemetry-datadog/CODEOWNERS b/opentelemetry-datadog/CODEOWNERS new file mode 100644 index 00000000..d6962a90 --- /dev/null +++ b/opentelemetry-datadog/CODEOWNERS @@ -0,0 +1,5 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. + +# For anything not explicitly taken by someone else: +* @open-telemetry/rust-approvers diff --git a/opentelemetry-datadog/Cargo.toml b/opentelemetry-datadog/Cargo.toml new file mode 100644 index 00000000..abdd5204 --- /dev/null +++ b/opentelemetry-datadog/Cargo.toml @@ -0,0 +1,53 @@ +[package] +name = "opentelemetry-datadog" +version = "0.9.0" +description = "Datadog exporters and propagators for OpenTelemetry" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-datadog" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-datadog" +readme = "README.md" +categories = [ + "development-tools::debugging", + "development-tools::profiling", +] +keywords = ["opentelemetry", "tracing"] +license = "Apache-2.0" +edition = "2021" +rust-version = "1.65" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[features] +reqwest-blocking-client = ["reqwest/blocking", "opentelemetry-http/reqwest"] +reqwest-client = ["reqwest", "opentelemetry-http/reqwest"] +surf-client = ["surf", "opentelemetry-http/surf"] + +[dependencies] +indexmap = "2.0" +once_cell = "1.12" +# TODO: Replace with opentelemetry version before release +opentelemetry = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", branch = "main" , features = ["trace"] } +opentelemetry_sdk = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", branch = "main" , features = ["trace"] } +opentelemetry-http = { version = "0.10" } +opentelemetry-semantic-conventions = { version = "0.13" } +rmp = "0.8" +url = "2.2" +reqwest = { version = "0.11", default-features = false, optional = true } +surf = { version = "2.0", default-features = false, optional = true } +thiserror = "1.0" +itertools = "0.11" +http = "0.2" +futures-core = "0.3" + +[dev-dependencies] +async-trait = "0.1" +base64 = "0.13" +bytes = "1" +futures-util = { version = "0.3", default-features = false, features = ["io"] } +isahc = "1.4" +opentelemetry_sdk = { version = "0.21", features = ["trace", "testing"] } + +[[example]] +name = "datadog" +path = "examples/datadog.rs" diff --git a/opentelemetry-datadog/LICENSE b/opentelemetry-datadog/LICENSE new file mode 100644 index 00000000..23a2acab --- /dev/null +++ b/opentelemetry-datadog/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 The OpenTelemetry Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/opentelemetry-datadog/README.md b/opentelemetry-datadog/README.md new file mode 100644 index 00000000..4a80077a --- /dev/null +++ b/opentelemetry-datadog/README.md @@ -0,0 +1,40 @@ +![OpenTelemetry — An observability framework for cloud-native software.][splash] + +[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png + +# OpenTelemetry Datadog + +Community supported vendor integrations for applications instrumented with [`OpenTelemetry`]. + +[![Crates.io: opentelemetry-datadog](https://img.shields.io/crates/v/opentelemetry-datadog.svg)](https://crates.io/crates/opentelemetry-datadog) +[![Documentation](https://docs.rs/opentelemetry-datadog/badge.svg)](https://docs.rs/opentelemetry-datadog) +[![LICENSE](https://img.shields.io/crates/l/opentelemetry-datadog)](./LICENSE) +[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amain) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) + +## Overview + +[`OpenTelemetry`] is a collection of tools, APIs, and SDKs used to instrument, +generate, collect, and export telemetry data (metrics, logs, and traces) for +analysis in order to understand your software's performance and behavior. This +crate provides additional propagators and exporters for sending telemetry data +to [`Datadog`]. + +## Features + +`opentelemetry-datadog` supports following features: + +- `reqwest-blocking-client`: use `reqwest` blocking http client to send spans. +- `reqwest-client`: use `reqwest` http client to send spans. +- `surf-client`: use `surf` http client to send spans. + + +## Kitchen Sink Full Configuration + + [Example](https://docs.rs/opentelemetry-datadog/latest/opentelemetry_datadog/#kitchen-sink-full-configuration) showing how to override all configuration options. See the + [`DatadogPipelineBuilder`] docs for details of each option. + + [`DatadogPipelineBuilder`]: https://docs.rs/opentelemetry-datadog/latest/opentelemetry_datadog/struct.DatadogPipelineBuilder.html + +[`Datadog`]: https://www.datadoghq.com/ +[`OpenTelemetry`]: https://crates.io/crates/opentelemetry diff --git a/opentelemetry-datadog/examples/README.md b/opentelemetry-datadog/examples/README.md new file mode 100644 index 00000000..c88351d6 --- /dev/null +++ b/opentelemetry-datadog/examples/README.md @@ -0,0 +1,16 @@ +# Datadog Exporter Example + +Sends spans to a datadog-agent collector. + +## Usage + +First run version 7.22.0 or above of the datadog-agent locally as described [here](https://docs.datadoghq.com/agent/) + +Then run the example to report spans: + +```shell +# cd opentelemetry-datadog +$ cargo run --example datadog +``` + +Traces should appear in the datadog APM dashboard diff --git a/opentelemetry-datadog/examples/datadog.rs b/opentelemetry-datadog/examples/datadog.rs new file mode 100644 index 00000000..1bf01f90 --- /dev/null +++ b/opentelemetry-datadog/examples/datadog.rs @@ -0,0 +1,40 @@ +use opentelemetry::{ + global::{self, shutdown_tracer_provider}, + trace::{Span, TraceContextExt, Tracer}, + Key, +}; +use opentelemetry_datadog::{new_pipeline, ApiVersion}; +use std::thread; +use std::time::Duration; + +fn bar() { + let tracer = global::tracer("component-bar"); + let mut span = tracer.start("bar"); + span.set_attribute(Key::new("span.type").string("sql")); + span.set_attribute(Key::new("sql.query").string("SELECT * FROM table")); + thread::sleep(Duration::from_millis(6)); + span.end() +} + +fn main() -> Result<(), Box> { + let tracer = new_pipeline() + .with_service_name("trace-demo") + .with_api_version(ApiVersion::Version05) + .install_simple()?; + + tracer.in_span("foo", |cx| { + let span = cx.span(); + span.set_attribute(Key::new("span.type").string("web")); + span.set_attribute(Key::new("http.url").string("http://localhost:8080/foo")); + span.set_attribute(Key::new("http.method").string("GET")); + span.set_attribute(Key::new("http.status_code").i64(200)); + + thread::sleep(Duration::from_millis(6)); + bar(); + thread::sleep(Duration::from_millis(6)); + }); + + shutdown_tracer_provider(); + + Ok(()) +} diff --git a/opentelemetry-datadog/src/exporter/intern.rs b/opentelemetry-datadog/src/exporter/intern.rs new file mode 100644 index 00000000..4a483c8d --- /dev/null +++ b/opentelemetry-datadog/src/exporter/intern.rs @@ -0,0 +1,53 @@ +use indexmap::set::IndexSet; + +pub(crate) struct StringInterner { + data: IndexSet, +} + +impl StringInterner { + pub(crate) fn new() -> StringInterner { + StringInterner { + data: Default::default(), + } + } + + pub(crate) fn intern(&mut self, data: &str) -> u32 { + if let Some(idx) = self.data.get_index_of(data) { + return idx as u32; + } + self.data.insert_full(data.to_string()).0 as u32 + } + + pub(crate) fn iter(&self) -> impl Iterator { + self.data.iter() + } + + pub(crate) fn len(&self) -> u32 { + self.data.len() as u32 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_intern() { + let a = "a".to_string(); + let b = "b"; + let c = "c"; + + let mut intern = StringInterner::new(); + let a_idx = intern.intern(a.as_str()); + let b_idx = intern.intern(b); + let c_idx = intern.intern(c); + let d_idx = intern.intern(a.as_str()); + let e_idx = intern.intern(c); + + assert_eq!(a_idx, 0); + assert_eq!(b_idx, 1); + assert_eq!(c_idx, 2); + assert_eq!(d_idx, a_idx); + assert_eq!(e_idx, c_idx); + } +} diff --git a/opentelemetry-datadog/src/exporter/mod.rs b/opentelemetry-datadog/src/exporter/mod.rs new file mode 100644 index 00000000..9b071833 --- /dev/null +++ b/opentelemetry-datadog/src/exporter/mod.rs @@ -0,0 +1,514 @@ +mod intern; +mod model; + +pub use model::ApiVersion; +pub use model::Error; +pub use model::FieldMappingFn; + +use crate::exporter::model::FieldMapping; +use futures_core::future::BoxFuture; +use http::{Method, Request, Uri}; +use itertools::Itertools; +use opentelemetry::{global, trace::TraceError, KeyValue}; +use opentelemetry_http::{HttpClient, ResponseExt}; +use opentelemetry_sdk::{ + export::trace::{ExportResult, SpanData, SpanExporter}, + resource::{ResourceDetector, SdkProvidedResourceDetector}, + runtime::RuntimeChannel, + trace::{Config, Tracer, TracerProvider}, + Resource, +}; +use opentelemetry_semantic_conventions as semcov; +use std::borrow::Cow; +use std::fmt::{Debug, Formatter}; +use std::sync::Arc; +use std::time::Duration; +use url::Url; + +use self::model::unified_tags::UnifiedTags; + +/// Default Datadog collector endpoint +const DEFAULT_AGENT_ENDPOINT: &str = "http://127.0.0.1:8126"; + +/// Header name used to inform the Datadog agent of the number of traces in the payload +const DATADOG_TRACE_COUNT_HEADER: &str = "X-Datadog-Trace-Count"; + +/// Header name use to inform datadog as to what version +const DATADOG_META_LANG_HEADER: &str = "Datadog-Meta-Lang"; +const DATADOG_META_TRACER_VERSION_HEADER: &str = "Datadog-Meta-Tracer-Version"; + +// Struct to hold the mapping between Opentelemetry spans and datadog spans. +pub struct Mapping { + resource: Option, + name: Option, + service_name: Option, +} + +impl Mapping { + pub fn new( + resource: Option, + name: Option, + service_name: Option, + ) -> Self { + Mapping { + resource, + name, + service_name, + } + } + pub fn empty() -> Self { + Self::new(None, None, None) + } +} + +/// Datadog span exporter +pub struct DatadogExporter { + client: Arc, + request_url: Uri, + model_config: ModelConfig, + api_version: ApiVersion, + mapping: Mapping, + unified_tags: UnifiedTags, +} + +impl DatadogExporter { + fn new( + model_config: ModelConfig, + request_url: Uri, + api_version: ApiVersion, + client: Arc, + mapping: Mapping, + unified_tags: UnifiedTags, + ) -> Self { + DatadogExporter { + client, + request_url, + model_config, + api_version, + mapping, + unified_tags, + } + } + + fn build_request(&self, batch: Vec) -> Result>, TraceError> { + let traces: Vec> = group_into_traces(batch); + let trace_count = traces.len(); + let data = self.api_version.encode( + &self.model_config, + traces, + &self.mapping, + &self.unified_tags, + )?; + let req = Request::builder() + .method(Method::POST) + .uri(self.request_url.clone()) + .header(http::header::CONTENT_TYPE, self.api_version.content_type()) + .header(DATADOG_TRACE_COUNT_HEADER, trace_count) + .header(DATADOG_META_LANG_HEADER, "rust") + .header( + DATADOG_META_TRACER_VERSION_HEADER, + env!("CARGO_PKG_VERSION"), + ) + .body(data) + .map_err::(Into::into)?; + + Ok(req) + } +} + +impl Debug for DatadogExporter { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DatadogExporter") + .field("model_config", &self.model_config) + .field("request_url", &self.request_url) + .field("api_version", &self.api_version) + .field("client", &self.client) + .field("resource_mapping", &mapping_debug(&self.mapping.resource)) + .field("name_mapping", &mapping_debug(&self.mapping.name)) + .field( + "service_name_mapping", + &mapping_debug(&self.mapping.service_name), + ) + .finish() + } +} + +/// Create a new Datadog exporter pipeline builder. +pub fn new_pipeline() -> DatadogPipelineBuilder { + DatadogPipelineBuilder::default() +} + +/// Builder for `ExporterConfig` struct. +pub struct DatadogPipelineBuilder { + agent_endpoint: String, + trace_config: Option, + api_version: ApiVersion, + client: Option>, + mapping: Mapping, + unified_tags: UnifiedTags, +} + +impl Default for DatadogPipelineBuilder { + fn default() -> Self { + DatadogPipelineBuilder { + agent_endpoint: DEFAULT_AGENT_ENDPOINT.to_string(), + trace_config: None, + mapping: Mapping::empty(), + api_version: ApiVersion::Version05, + unified_tags: UnifiedTags::new(), + #[cfg(all( + not(feature = "reqwest-client"), + not(feature = "reqwest-blocking-client"), + not(feature = "surf-client"), + ))] + client: None, + #[cfg(all( + not(feature = "reqwest-client"), + not(feature = "reqwest-blocking-client"), + feature = "surf-client" + ))] + client: Some(Arc::new(surf::Client::new())), + #[cfg(all( + not(feature = "surf-client"), + not(feature = "reqwest-blocking-client"), + feature = "reqwest-client" + ))] + client: Some(Arc::new(reqwest::Client::new())), + #[cfg(feature = "reqwest-blocking-client")] + client: Some(Arc::new(reqwest::blocking::Client::new())), + } + } +} + +impl Debug for DatadogPipelineBuilder { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DatadogExporter") + .field("agent_endpoint", &self.agent_endpoint) + .field("trace_config", &self.trace_config) + .field("client", &self.client) + .field("resource_mapping", &mapping_debug(&self.mapping.resource)) + .field("name_mapping", &mapping_debug(&self.mapping.name)) + .field( + "service_name_mapping", + &mapping_debug(&self.mapping.service_name), + ) + .finish() + } +} + +impl DatadogPipelineBuilder { + /// Building a new exporter. + /// + /// This is useful if you are manually constructing a pipeline. + pub fn build_exporter(mut self) -> Result { + let (_, service_name) = self.build_config_and_service_name(); + self.build_exporter_with_service_name(service_name) + } + + fn build_config_and_service_name(&mut self) -> (Config, String) { + let service_name = self.unified_tags.service(); + if let Some(service_name) = service_name { + let config = if let Some(mut cfg) = self.trace_config.take() { + cfg.resource = Cow::Owned(Resource::new( + cfg.resource + .iter() + .filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME) + .map(|(k, v)| KeyValue::new(k.clone(), v.clone())), + )); + cfg + } else { + Config { + resource: Cow::Owned(Resource::empty()), + ..Default::default() + } + }; + (config, service_name) + } else { + let service_name = SdkProvidedResourceDetector + .detect(Duration::from_secs(0)) + .get(semcov::resource::SERVICE_NAME) + .unwrap() + .to_string(); + ( + Config { + // use a empty resource to prevent TracerProvider to assign a service name. + resource: Cow::Owned(Resource::empty()), + ..Default::default() + }, + service_name, + ) + } + } + + // parse the endpoint and append the path based on versions. + // keep the query and host the same. + fn build_endpoint(agent_endpoint: &str, version: &str) -> Result { + // build agent endpoint based on version + let mut endpoint = agent_endpoint + .parse::() + .map_err::(Into::into)?; + let mut paths = endpoint + .path_segments() + .map(|c| c.filter(|s| !s.is_empty()).collect::>()) + .unwrap_or_default(); + paths.push(version); + + let path_str = paths.join("/"); + endpoint.set_path(path_str.as_str()); + + Ok(endpoint.as_str().parse().map_err::(Into::into)?) + } + + fn build_exporter_with_service_name( + self, + service_name: String, + ) -> Result { + if let Some(client) = self.client { + let model_config = ModelConfig { service_name }; + + let exporter = DatadogExporter::new( + model_config, + Self::build_endpoint(&self.agent_endpoint, self.api_version.path())?, + self.api_version, + client, + self.mapping, + self.unified_tags, + ); + Ok(exporter) + } else { + Err(Error::NoHttpClient.into()) + } + } + + /// Install the Datadog trace exporter pipeline using a simple span processor. + pub fn install_simple(mut self) -> Result { + let (config, service_name) = self.build_config_and_service_name(); + let exporter = self.build_exporter_with_service_name(service_name)?; + let mut provider_builder = TracerProvider::builder().with_simple_exporter(exporter); + provider_builder = provider_builder.with_config(config); + let provider = provider_builder.build(); + let tracer = opentelemetry::trace::TracerProvider::versioned_tracer( + &provider, + "opentelemetry-datadog", + Some(env!("CARGO_PKG_VERSION")), + Some(semcov::SCHEMA_URL), + None, + ); + let _ = global::set_tracer_provider(provider); + Ok(tracer) + } + + /// Install the Datadog trace exporter pipeline using a batch span processor with the specified + /// runtime. + pub fn install_batch(mut self, runtime: R) -> Result { + let (config, service_name) = self.build_config_and_service_name(); + let exporter = self.build_exporter_with_service_name(service_name)?; + let mut provider_builder = TracerProvider::builder().with_batch_exporter(exporter, runtime); + provider_builder = provider_builder.with_config(config); + let provider = provider_builder.build(); + let tracer = opentelemetry::trace::TracerProvider::versioned_tracer( + &provider, + "opentelemetry-datadog", + Some(env!("CARGO_PKG_VERSION")), + Some(semcov::SCHEMA_URL), + None, + ); + let _ = global::set_tracer_provider(provider); + Ok(tracer) + } + + /// Assign the service name under which to group traces + pub fn with_service_name>(mut self, service_name: T) -> Self { + self.unified_tags.set_service(Some(service_name.into())); + self + } + + /// Assign the version under which to group traces + pub fn with_version>(mut self, version: T) -> Self { + self.unified_tags.set_version(Some(version.into())); + self + } + + /// Assign the env under which to group traces + pub fn with_env>(mut self, env: T) -> Self { + self.unified_tags.set_env(Some(env.into())); + self + } + + /// Assign the Datadog collector endpoint. + /// + /// The endpoint of the datadog agent, by default it is `http://127.0.0.1:8126`. + pub fn with_agent_endpoint>(mut self, endpoint: T) -> Self { + self.agent_endpoint = endpoint.into(); + self + } + + /// Choose the http client used by uploader + pub fn with_http_client(mut self, client: T) -> Self { + self.client = Some(Arc::new(client)); + self + } + + /// Assign the SDK trace configuration + pub fn with_trace_config(mut self, config: Config) -> Self { + self.trace_config = Some(config); + self + } + + /// Set version of Datadog trace ingestion API + pub fn with_api_version(mut self, api_version: ApiVersion) -> Self { + self.api_version = api_version; + self + } + + /// Custom the value used for `resource` field in datadog spans. + /// See [`FieldMappingFn`] for details. + pub fn with_resource_mapping(mut self, f: F) -> Self + where + F: for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync + 'static, + { + self.mapping.resource = Some(Arc::new(f)); + self + } + + /// Custom the value used for `name` field in datadog spans. + /// See [`FieldMappingFn`] for details. + pub fn with_name_mapping(mut self, f: F) -> Self + where + F: for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync + 'static, + { + self.mapping.name = Some(Arc::new(f)); + self + } + + /// Custom the value used for `service_name` field in datadog spans. + /// See [`FieldMappingFn`] for details. + pub fn with_service_name_mapping(mut self, f: F) -> Self + where + F: for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync + 'static, + { + self.mapping.service_name = Some(Arc::new(f)); + self + } +} + +fn group_into_traces(spans: Vec) -> Vec> { + spans + .into_iter() + .into_group_map_by(|span_data| span_data.span_context.trace_id()) + .into_values() + .collect() +} + +async fn send_request( + client: Arc, + request: http::Request>, +) -> ExportResult { + let _ = client.send(request).await?.error_for_status()?; + Ok(()) +} + +impl SpanExporter for DatadogExporter { + /// Export spans to datadog-agent + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { + let request = match self.build_request(batch) { + Ok(req) => req, + Err(err) => return Box::pin(std::future::ready(Err(err))), + }; + + let client = self.client.clone(); + Box::pin(send_request(client, request)) + } +} + +/// Helper struct to custom the mapping between Opentelemetry spans and datadog spans. +/// +/// This struct will be passed to [`FieldMappingFn`] +#[derive(Default, Debug)] +#[non_exhaustive] +pub struct ModelConfig { + pub service_name: String, +} + +fn mapping_debug(f: &Option) -> String { + if f.is_some() { + "custom mapping" + } else { + "default mapping" + } + .to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ApiVersion::Version05; + + use crate::exporter::model::tests::get_span; + + #[test] + fn test_out_of_order_group() { + let batch = vec![get_span(1, 1, 1), get_span(2, 2, 2), get_span(1, 1, 3)]; + let expected = vec![ + vec![get_span(1, 1, 1), get_span(1, 1, 3)], + vec![get_span(2, 2, 2)], + ]; + + let mut traces = group_into_traces(batch); + // We need to sort the output in order to compare, but this is not required by the Datadog agent + traces.sort_by_key(|t| u128::from_be_bytes(t[0].span_context.trace_id().to_bytes())); + + assert_eq!(traces, expected); + } + + #[test] + fn test_agent_endpoint_with_api_version() { + let with_tail_slash = + DatadogPipelineBuilder::build_endpoint("http://localhost:8126/", Version05.path()); + let without_tail_slash = + DatadogPipelineBuilder::build_endpoint("http://localhost:8126", Version05.path()); + let with_query = DatadogPipelineBuilder::build_endpoint( + "http://localhost:8126?api_key=123", + Version05.path(), + ); + let invalid = DatadogPipelineBuilder::build_endpoint( + "http://localhost:klsajfjksfh", + Version05.path(), + ); + + assert_eq!( + with_tail_slash.unwrap().to_string(), + "http://localhost:8126/v0.5/traces" + ); + assert_eq!( + without_tail_slash.unwrap().to_string(), + "http://localhost:8126/v0.5/traces" + ); + assert_eq!( + with_query.unwrap().to_string(), + "http://localhost:8126/v0.5/traces?api_key=123" + ); + assert!(invalid.is_err()) + } + + #[derive(Debug)] + struct DummyClient; + + #[async_trait::async_trait] + impl HttpClient for DummyClient { + async fn send( + &self, + _request: Request>, + ) -> Result, opentelemetry_http::HttpError> { + Ok(http::Response::new("dummy response".into())) + } + } + + #[test] + fn test_custom_http_client() { + new_pipeline() + .with_http_client(DummyClient) + .build_exporter() + .unwrap(); + } +} diff --git a/opentelemetry-datadog/src/exporter/model/mod.rs b/opentelemetry-datadog/src/exporter/model/mod.rs new file mode 100644 index 00000000..f0b626a3 --- /dev/null +++ b/opentelemetry-datadog/src/exporter/model/mod.rs @@ -0,0 +1,298 @@ +use crate::exporter::ModelConfig; +use http::uri; +use opentelemetry_sdk::export::{ + trace::{self, SpanData}, + ExportError, +}; +use std::fmt::Debug; +use url::ParseError; + +use self::unified_tags::UnifiedTags; + +use super::Mapping; + +pub mod unified_tags; +mod v03; +mod v05; + +// todo: we should follow the same mapping defined in https://github.com/DataDog/datadog-agent/blob/main/pkg/trace/api/otlp.go + +// https://github.com/DataDog/dd-trace-js/blob/c89a35f7d27beb4a60165409376e170eacb194c5/packages/dd-trace/src/constants.js#L4 +static SAMPLING_PRIORITY_KEY: &str = "_sampling_priority_v1"; + +/// Custom mapping between opentelemetry spans and datadog spans. +/// +/// User can provide custom function to change the mapping. It currently supports customizing the following +/// fields in Datadog span protocol. +/// +/// |field name|default value| +/// |---------------|-------------| +/// |service name| service name configuration from [`ModelConfig`]| +/// |name | opentelemetry instrumentation library name | +/// |resource| opentelemetry name| +/// +/// The function takes a reference to [`SpanData`]() and a reference to [`ModelConfig`]() as parameters. +/// It should return a `&str` which will be used as the value for the field. +/// +/// If no custom mapping is provided. Default mapping detailed above will be used. +/// +/// For example, +/// ```no_run +/// use opentelemetry_datadog::{ApiVersion, new_pipeline}; +/// fn main() -> Result<(), opentelemetry::trace::TraceError> { +/// let tracer = new_pipeline() +/// .with_service_name("my_app") +/// .with_api_version(ApiVersion::Version05) +/// // the custom mapping below will change the all spans' name to datadog spans +/// .with_name_mapping(|span, model_config|{ +/// "datadog spans" +/// }) +/// .with_agent_endpoint("http://localhost:8126") +/// .install_batch(opentelemetry_sdk::runtime::Tokio)?; +/// +/// Ok(()) +/// } +/// ``` +pub type FieldMappingFn = dyn for<'a> Fn(&'a SpanData, &'a ModelConfig) -> &'a str + Send + Sync; + +pub(crate) type FieldMapping = std::sync::Arc; + +// Datadog uses some magic tags in their models. There is no recommended mapping defined in +// opentelemetry spec. Below is default mapping we gonna uses. Users can override it by providing +// their own implementations. +fn default_service_name_mapping<'a>(_span: &'a SpanData, config: &'a ModelConfig) -> &'a str { + config.service_name.as_str() +} + +fn default_name_mapping<'a>(span: &'a SpanData, _config: &'a ModelConfig) -> &'a str { + span.instrumentation_lib.name.as_ref() +} + +fn default_resource_mapping<'a>(span: &'a SpanData, _config: &'a ModelConfig) -> &'a str { + span.name.as_ref() +} + +/// Wrap type for errors from opentelemetry datadog exporter +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Message pack error + #[error("message pack error")] + MessagePackError, + /// No http client founded. User should provide one or enable features + #[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")] + NoHttpClient, + /// Http requests failed with following errors + #[error(transparent)] + RequestError(#[from] http::Error), + /// The Uri was invalid + #[error("invalid url {0}")] + InvalidUri(String), + /// Other errors + #[error("{0}")] + Other(String), +} + +impl ExportError for Error { + fn exporter_name(&self) -> &'static str { + "datadog" + } +} + +impl From for Error { + fn from(_: rmp::encode::ValueWriteError) -> Self { + Self::MessagePackError + } +} + +impl From for Error { + fn from(err: ParseError) -> Self { + Self::InvalidUri(err.to_string()) + } +} + +impl From for Error { + fn from(err: uri::InvalidUri) -> Self { + Self::InvalidUri(err.to_string()) + } +} + +/// Version of datadog trace ingestion API +#[derive(Debug, Copy, Clone)] +#[non_exhaustive] +pub enum ApiVersion { + /// Version 0.3 + Version03, + /// Version 0.5 - requires datadog-agent v7.22.0 or above + Version05, +} + +impl ApiVersion { + pub(crate) fn path(self) -> &'static str { + match self { + ApiVersion::Version03 => "/v0.3/traces", + ApiVersion::Version05 => "/v0.5/traces", + } + } + + pub(crate) fn content_type(self) -> &'static str { + match self { + ApiVersion::Version03 => "application/msgpack", + ApiVersion::Version05 => "application/msgpack", + } + } + + pub(crate) fn encode( + self, + model_config: &ModelConfig, + traces: Vec>, + mapping: &Mapping, + unified_tags: &UnifiedTags, + ) -> Result, Error> { + match self { + Self::Version03 => v03::encode( + model_config, + traces, + |span, config| match &mapping.service_name { + Some(f) => f(span, config), + None => default_service_name_mapping(span, config), + }, + |span, config| match &mapping.name { + Some(f) => f(span, config), + None => default_name_mapping(span, config), + }, + |span, config| match &mapping.resource { + Some(f) => f(span, config), + None => default_resource_mapping(span, config), + }, + ), + Self::Version05 => v05::encode( + model_config, + traces, + |span, config| match &mapping.service_name { + Some(f) => f(span, config), + None => default_service_name_mapping(span, config), + }, + |span, config| match &mapping.name { + Some(f) => f(span, config), + None => default_name_mapping(span, config), + }, + |span, config| match &mapping.resource { + Some(f) => f(span, config), + None => default_resource_mapping(span, config), + }, + unified_tags, + ), + } + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use opentelemetry::{ + trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState}, + KeyValue, + }; + use opentelemetry_sdk::{ + self, + trace::{SpanEvents, SpanLinks}, + InstrumentationLibrary, Resource, + }; + use std::borrow::Cow; + use std::time::{Duration, SystemTime}; + + fn get_traces() -> Vec> { + vec![vec![get_span(7, 1, 99)]] + } + + pub(crate) fn get_span(trace_id: u128, parent_span_id: u64, span_id: u64) -> trace::SpanData { + let span_context = SpanContext::new( + TraceId::from_u128(trace_id), + SpanId::from_u64(span_id), + TraceFlags::default(), + false, + TraceState::default(), + ); + + let start_time = SystemTime::UNIX_EPOCH; + let end_time = start_time.checked_add(Duration::from_secs(1)).unwrap(); + + let attributes = vec![KeyValue::new("span.type", "web")]; + let events = SpanEvents::default(); + let links = SpanLinks::default(); + let resource = Resource::new(vec![KeyValue::new("host.name", "test")]); + + trace::SpanData { + span_context, + parent_span_id: SpanId::from_u64(parent_span_id), + span_kind: SpanKind::Client, + name: "resource".into(), + start_time, + end_time, + attributes, + dropped_attributes_count: 0, + events, + links, + status: Status::Ok, + resource: Cow::Owned(resource), + instrumentation_lib: InstrumentationLibrary::new( + "component", + None::<&'static str>, + None::<&'static str>, + None, + ), + } + } + + #[test] + fn test_encode_v03() -> Result<(), Box> { + let traces = get_traces(); + let model_config = ModelConfig { + service_name: "service_name".to_string(), + ..Default::default() + }; + let encoded = base64::encode(ApiVersion::Version03.encode( + &model_config, + traces, + &Mapping::empty(), + &UnifiedTags::new(), + )?); + + assert_eq!(encoded.as_str(), "kZGMpHR5cGWjd2Vip3NlcnZpY2Wsc2VydmljZV9uYW1lpG5hbWWpY29tcG9uZW\ + 50qHJlc291cmNlqHJlc291cmNlqHRyYWNlX2lkzwAAAAAAAAAHp3NwYW5faWTPAAAAAAAAAGOpcGFyZW50X2lkzwAAAA\ + AAAAABpXN0YXJ00wAAAAAAAAAAqGR1cmF0aW9u0wAAAAA7msoApWVycm9y0gAAAACkbWV0YYKpaG9zdC5uYW1lpHRlc3\ + Spc3Bhbi50eXBlo3dlYqdtZXRyaWNzgbVfc2FtcGxpbmdfcHJpb3JpdHlfdjHLAAAAAAAAAAA="); + + Ok(()) + } + + #[test] + fn test_encode_v05() -> Result<(), Box> { + let traces = get_traces(); + let model_config = ModelConfig { + service_name: "service_name".to_string(), + ..Default::default() + }; + + let mut unified_tags = UnifiedTags::new(); + unified_tags.set_env(Some(String::from("test-env"))); + unified_tags.set_version(Some(String::from("test-version"))); + unified_tags.set_service(Some(String::from("test-service"))); + + let _encoded = base64::encode(ApiVersion::Version05.encode( + &model_config, + traces, + &Mapping::empty(), + &unified_tags, + )?); + + // TODO: Need someone to generate the expected result or instructions to do so. + // assert_eq!(encoded.as_str(), "kp6jd2VirHNlcnZpY2VfbmFtZaljb21wb25lbnSocmVzb3VyY2WpaG9zdC5uYW\ + // 1lpHRlc3Snc2VydmljZax0ZXN0LXNlcnZpY2WjZW52qHRlc3QtZW52p3ZlcnNpb26sdGVzdC12ZXJzaW9uqXNwYW4udH\ + // lwZbVfc2FtcGxpbmdfcHJpb3JpdHlfdjGRkZzOAAAAAc4AAAACzgAAAAPPAAAAAAAAAAfPAAAAAAAAAGPPAAAAAAAAAA\ + // HTAAAAAAAAAADTAAAAADuaygDSAAAAAIXOAAAABM4AAAAFzgAAAAbOAAAAB84AAAAIzgAAAAnOAAAACs4AAAALzgAAAA\ + // zOAAAAAIHOAAAADcsAAAAAAAAAAM4AAAAA"); + + Ok(()) + } +} diff --git a/opentelemetry-datadog/src/exporter/model/unified_tags.rs b/opentelemetry-datadog/src/exporter/model/unified_tags.rs new file mode 100644 index 00000000..e4e835c5 --- /dev/null +++ b/opentelemetry-datadog/src/exporter/model/unified_tags.rs @@ -0,0 +1,123 @@ +/// Unified tags - See: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging + +pub struct UnifiedTags { + pub service: UnifiedTagField, + pub env: UnifiedTagField, + pub version: UnifiedTagField, +} + +impl UnifiedTags { + pub fn new() -> Self { + UnifiedTags { + service: UnifiedTagField::new(UnifiedTagEnum::Service), + env: UnifiedTagField::new(UnifiedTagEnum::Env), + version: UnifiedTagField::new(UnifiedTagEnum::Version), + } + } + pub fn set_service(&mut self, service: Option) { + self.service.value = service; + } + pub fn set_version(&mut self, version: Option) { + self.version.value = version; + } + pub fn set_env(&mut self, env: Option) { + self.env.value = env; + } + pub fn service(&self) -> Option { + self.service.value.clone() + } + pub fn compute_attribute_size(&self) -> u32 { + self.service.len() + self.env.len() + self.version.len() + } +} + +pub struct UnifiedTagField { + pub value: Option, + pub kind: UnifiedTagEnum, +} + +impl UnifiedTagField { + pub fn new(kind: UnifiedTagEnum) -> Self { + UnifiedTagField { + value: kind.find_unified_tag_value(), + kind, + } + } + pub fn len(&self) -> u32 { + if self.value.is_some() { + return 1; + } + 0 + } + pub fn get_tag_name(&self) -> &'static str { + self.kind.get_tag_name() + } +} + +pub enum UnifiedTagEnum { + Service, + Version, + Env, +} + +impl UnifiedTagEnum { + fn get_env_variable_name(&self) -> &'static str { + match self { + UnifiedTagEnum::Service => "DD_SERVICE", + UnifiedTagEnum::Version => "DD_VERSION", + UnifiedTagEnum::Env => "DD_ENV", + } + } + fn get_tag_name(&self) -> &'static str { + match self { + UnifiedTagEnum::Service => "service", + UnifiedTagEnum::Version => "version", + UnifiedTagEnum::Env => "env", + } + } + fn find_unified_tag_value(&self) -> Option { + let env_name_to_check = self.get_env_variable_name(); + match std::env::var(env_name_to_check) { + Ok(tag_value) => Some(tag_value.to_lowercase()), + _ => None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_service() { + std::env::set_var("DD_SERVICE", "test-SERVICE"); + let mut unified_tags = UnifiedTags::new(); + assert_eq!("test-service", unified_tags.service.value.clone().unwrap()); + unified_tags.set_service(Some(String::from("new_service"))); + assert_eq!("new_service", unified_tags.service().unwrap()); + std::env::remove_var("DD_SERVICE"); + } + + #[test] + fn test_env() { + std::env::set_var("DD_ENV", "test-env"); + let mut unified_tags = UnifiedTags::new(); + assert_eq!("test-env", unified_tags.env.value.clone().unwrap()); + unified_tags.set_env(Some(String::from("new_env"))); + assert_eq!("new_env", unified_tags.env.value.unwrap()); + std::env::remove_var("DD_ENV"); + } + + #[test] + fn test_version() { + std::env::set_var("DD_VERSION", "test-version-1.2.3"); + let mut unified_tags = UnifiedTags::new(); + assert_eq!( + "test-version-1.2.3", + unified_tags.version.value.clone().unwrap() + ); + unified_tags.set_version(Some(String::from("new_version"))); + assert_eq!("new_version", unified_tags.version.value.unwrap()); + std::env::remove_var("DD_VERSION"); + } +} diff --git a/opentelemetry-datadog/src/exporter/model/v03.rs b/opentelemetry-datadog/src/exporter/model/v03.rs new file mode 100644 index 00000000..8f27dce7 --- /dev/null +++ b/opentelemetry-datadog/src/exporter/model/v03.rs @@ -0,0 +1,126 @@ +use crate::exporter::model::{Error, SAMPLING_PRIORITY_KEY}; +use crate::exporter::ModelConfig; +use opentelemetry::trace::Status; +use opentelemetry_sdk::export::trace::SpanData; +use std::time::SystemTime; + +pub(crate) fn encode( + model_config: &ModelConfig, + traces: Vec>, + get_service_name: S, + get_name: N, + get_resource: R, +) -> Result, Error> +where + for<'a> S: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> N: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> R: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, +{ + let mut encoded = Vec::new(); + rmp::encode::write_array_len(&mut encoded, traces.len() as u32)?; + + for trace in traces.into_iter() { + rmp::encode::write_array_len(&mut encoded, trace.len() as u32)?; + + for span in trace.into_iter() { + // Safe until the year 2262 when Datadog will need to change their API + let start = span + .start_time + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_nanos() as i64; + + let duration = span + .end_time + .duration_since(span.start_time) + .map(|x| x.as_nanos() as i64) + .unwrap_or(0); + + let mut span_type_found = false; + for kv in &span.attributes { + if kv.key.as_str() == "span.type" { + span_type_found = true; + rmp::encode::write_map_len(&mut encoded, 12)?; + rmp::encode::write_str(&mut encoded, "type")?; + rmp::encode::write_str(&mut encoded, kv.value.as_str().as_ref())?; + break; + } + } + + if !span_type_found { + rmp::encode::write_map_len(&mut encoded, 11)?; + } + + // Datadog span name is OpenTelemetry component name - see module docs for more information + rmp::encode::write_str(&mut encoded, "service")?; + rmp::encode::write_str(&mut encoded, get_service_name(&span, model_config))?; + + rmp::encode::write_str(&mut encoded, "name")?; + rmp::encode::write_str(&mut encoded, get_name(&span, model_config))?; + + rmp::encode::write_str(&mut encoded, "resource")?; + rmp::encode::write_str(&mut encoded, get_resource(&span, model_config))?; + + rmp::encode::write_str(&mut encoded, "trace_id")?; + rmp::encode::write_u64( + &mut encoded, + u128::from_be_bytes(span.span_context.trace_id().to_bytes()) as u64, + )?; + + rmp::encode::write_str(&mut encoded, "span_id")?; + rmp::encode::write_u64( + &mut encoded, + u64::from_be_bytes(span.span_context.span_id().to_bytes()), + )?; + + rmp::encode::write_str(&mut encoded, "parent_id")?; + rmp::encode::write_u64( + &mut encoded, + u64::from_be_bytes(span.parent_span_id.to_bytes()), + )?; + + rmp::encode::write_str(&mut encoded, "start")?; + rmp::encode::write_i64(&mut encoded, start)?; + + rmp::encode::write_str(&mut encoded, "duration")?; + rmp::encode::write_i64(&mut encoded, duration)?; + + rmp::encode::write_str(&mut encoded, "error")?; + rmp::encode::write_i32( + &mut encoded, + match span.status { + Status::Error { .. } => 1, + _ => 0, + }, + )?; + + rmp::encode::write_str(&mut encoded, "meta")?; + rmp::encode::write_map_len( + &mut encoded, + (span.attributes.len() + span.resource.len()) as u32, + )?; + for (key, value) in span.resource.iter() { + rmp::encode::write_str(&mut encoded, key.as_str())?; + rmp::encode::write_str(&mut encoded, value.as_str().as_ref())?; + } + for kv in span.attributes.iter() { + rmp::encode::write_str(&mut encoded, kv.key.as_str())?; + rmp::encode::write_str(&mut encoded, kv.value.as_str().as_ref())?; + } + + rmp::encode::write_str(&mut encoded, "metrics")?; + rmp::encode::write_map_len(&mut encoded, 1)?; + rmp::encode::write_str(&mut encoded, SAMPLING_PRIORITY_KEY)?; + rmp::encode::write_f64( + &mut encoded, + if span.span_context.is_sampled() { + 1.0 + } else { + 0.0 + }, + )?; + } + } + + Ok(encoded) +} diff --git a/opentelemetry-datadog/src/exporter/model/v05.rs b/opentelemetry-datadog/src/exporter/model/v05.rs new file mode 100644 index 00000000..f64de885 --- /dev/null +++ b/opentelemetry-datadog/src/exporter/model/v05.rs @@ -0,0 +1,222 @@ +use crate::exporter::intern::StringInterner; +use crate::exporter::model::SAMPLING_PRIORITY_KEY; +use crate::exporter::{Error, ModelConfig}; +use opentelemetry::trace::Status; +use opentelemetry_sdk::export::trace::SpanData; +use std::time::SystemTime; + +use super::unified_tags::{UnifiedTagField, UnifiedTags}; + +const SPAN_NUM_ELEMENTS: u32 = 12; + +// Protocol documentation sourced from https://github.com/DataDog/datadog-agent/blob/c076ea9a1ffbde4c76d35343dbc32aecbbf99cb9/pkg/trace/api/version.go +// +// The payload is an array containing exactly 12 elements: +// +// 1. An array of all unique strings present in the payload (a dictionary referred to by index). +// 2. An array of traces, where each trace is an array of spans. A span is encoded as an array having +// exactly 12 elements, representing all span properties, in this exact order: +// +// 0: Service (uint32) +// 1: Name (uint32) +// 2: Resource (uint32) +// 3: TraceID (uint64) +// 4: SpanID (uint64) +// 5: ParentID (uint64) +// 6: Start (int64) +// 7: Duration (int64) +// 8: Error (int32) +// 9: Meta (map[uint32]uint32) +// 10: Metrics (map[uint32]float64) +// 11: Type (uint32) +// +// Considerations: +// +// - The "uint32" typed values in "Service", "Name", "Resource", "Type", "Meta" and "Metrics" represent +// the index at which the corresponding string is found in the dictionary. If any of the values are the +// empty string, then the empty string must be added into the dictionary. +// +// - None of the elements can be nil. If any of them are unset, they should be given their "zero-value". Here +// is an example of a span with all unset values: +// +// 0: 0 // Service is "" (index 0 in dictionary) +// 1: 0 // Name is "" +// 2: 0 // Resource is "" +// 3: 0 // TraceID +// 4: 0 // SpanID +// 5: 0 // ParentID +// 6: 0 // Start +// 7: 0 // Duration +// 8: 0 // Error +// 9: map[uint32]uint32{} // Meta (empty map) +// 10: map[uint32]float64{} // Metrics (empty map) +// 11: 0 // Type is "" +// +// The dictionary in this case would be []string{""}, having only the empty string at index 0. +// +pub(crate) fn encode( + model_config: &ModelConfig, + traces: Vec>, + get_service_name: S, + get_name: N, + get_resource: R, + unified_tags: &UnifiedTags, +) -> Result, Error> +where + for<'a> S: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> N: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> R: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, +{ + let mut interner = StringInterner::new(); + let mut encoded_traces = encode_traces( + &mut interner, + model_config, + get_service_name, + get_name, + get_resource, + traces, + unified_tags, + )?; + + let mut payload = Vec::new(); + rmp::encode::write_array_len(&mut payload, 2)?; + + rmp::encode::write_array_len(&mut payload, interner.len())?; + for data in interner.iter() { + rmp::encode::write_str(&mut payload, data)?; + } + + payload.append(&mut encoded_traces); + + Ok(payload) +} + +fn write_unified_tags( + encoded: &mut Vec, + interner: &mut StringInterner, + unified_tags: &UnifiedTags, +) -> Result<(), Error> { + write_unified_tag(encoded, interner, &unified_tags.service)?; + write_unified_tag(encoded, interner, &unified_tags.env)?; + write_unified_tag(encoded, interner, &unified_tags.version)?; + Ok(()) +} + +fn write_unified_tag( + encoded: &mut Vec, + interner: &mut StringInterner, + tag: &UnifiedTagField, +) -> Result<(), Error> { + if let Some(tag_value) = &tag.value { + rmp::encode::write_u32(encoded, interner.intern(tag.get_tag_name()))?; + rmp::encode::write_u32(encoded, interner.intern(tag_value.as_str().as_ref()))?; + } + Ok(()) +} + +fn encode_traces( + interner: &mut StringInterner, + model_config: &ModelConfig, + get_service_name: S, + get_name: N, + get_resource: R, + traces: Vec>, + unified_tags: &UnifiedTags, +) -> Result, Error> +where + for<'a> S: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> N: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, + for<'a> R: Fn(&'a SpanData, &'a ModelConfig) -> &'a str, +{ + let mut encoded = Vec::new(); + rmp::encode::write_array_len(&mut encoded, traces.len() as u32)?; + + for trace in traces.into_iter() { + rmp::encode::write_array_len(&mut encoded, trace.len() as u32)?; + + for span in trace.into_iter() { + // Safe until the year 2262 when Datadog will need to change their API + let start = span + .start_time + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_nanos() as i64; + + let duration = span + .end_time + .duration_since(span.start_time) + .map(|x| x.as_nanos() as i64) + .unwrap_or(0); + + let mut span_type = interner.intern(""); + for kv in &span.attributes { + if kv.key.as_str() == "span.type" { + span_type = interner.intern(kv.value.as_str().as_ref()); + break; + } + } + + // Datadog span name is OpenTelemetry component name - see module docs for more information + rmp::encode::write_array_len(&mut encoded, SPAN_NUM_ELEMENTS)?; + rmp::encode::write_u32( + &mut encoded, + interner.intern(get_service_name(&span, model_config)), + )?; + rmp::encode::write_u32(&mut encoded, interner.intern(get_name(&span, model_config)))?; + rmp::encode::write_u32( + &mut encoded, + interner.intern(get_resource(&span, model_config)), + )?; + rmp::encode::write_u64( + &mut encoded, + u128::from_be_bytes(span.span_context.trace_id().to_bytes()) as u64, + )?; + rmp::encode::write_u64( + &mut encoded, + u64::from_be_bytes(span.span_context.span_id().to_bytes()), + )?; + rmp::encode::write_u64( + &mut encoded, + u64::from_be_bytes(span.parent_span_id.to_bytes()), + )?; + rmp::encode::write_i64(&mut encoded, start)?; + rmp::encode::write_i64(&mut encoded, duration)?; + rmp::encode::write_i32( + &mut encoded, + match span.status { + Status::Error { .. } => 1, + _ => 0, + }, + )?; + rmp::encode::write_map_len( + &mut encoded, + (span.attributes.len() + span.resource.len()) as u32 + + unified_tags.compute_attribute_size(), + )?; + for (key, value) in span.resource.iter() { + rmp::encode::write_u32(&mut encoded, interner.intern(key.as_str()))?; + rmp::encode::write_u32(&mut encoded, interner.intern(value.as_str().as_ref()))?; + } + + write_unified_tags(&mut encoded, interner, unified_tags)?; + + for kv in span.attributes.iter() { + rmp::encode::write_u32(&mut encoded, interner.intern(kv.key.as_str()))?; + rmp::encode::write_u32(&mut encoded, interner.intern(kv.value.as_str().as_ref()))?; + } + rmp::encode::write_map_len(&mut encoded, 1)?; + rmp::encode::write_u32(&mut encoded, interner.intern(SAMPLING_PRIORITY_KEY))?; + rmp::encode::write_f64( + &mut encoded, + if span.span_context.is_sampled() { + 1.0 + } else { + 0.0 + }, + )?; + rmp::encode::write_u32(&mut encoded, span_type)?; + } + } + + Ok(encoded) +} diff --git a/opentelemetry-datadog/src/lib.rs b/opentelemetry-datadog/src/lib.rs new file mode 100644 index 00000000..273b9bc8 --- /dev/null +++ b/opentelemetry-datadog/src/lib.rs @@ -0,0 +1,401 @@ +//! # OpenTelemetry Datadog Exporter +//! +//! An OpenTelemetry datadog exporter implementation +//! +//! See the [Datadog Docs](https://docs.datadoghq.com/agent/) for information on how to run the datadog-agent +//! +//! ## Quirks +//! +//! There are currently some incompatibilities between Datadog and OpenTelemetry, and this manifests +//! as minor quirks to this exporter. +//! +//! Firstly Datadog uses operation_name to describe what OpenTracing would call a component. +//! Or to put it another way, in OpenTracing the operation / span name's are relatively +//! granular and might be used to identify a specific endpoint. In datadog, however, they +//! are less granular - it is expected in Datadog that a service will have single +//! primary span name that is the root of all traces within that service, with an additional piece of +//! metadata called resource_name providing granularity. See [here](https://docs.datadoghq.com/tracing/guide/configuring-primary-operation/) +//! +//! The Datadog Golang API takes the approach of using a `resource.name` OpenTelemetry attribute to set the +//! resource_name. See [here](https://github.com/DataDog/dd-trace-go/blob/ecb0b805ef25b00888a2fb62d465a5aa95e7301e/ddtrace/opentracer/tracer.go#L10) +//! +//! Unfortunately, this breaks compatibility with other OpenTelemetry exporters which expect +//! a more granular operation name - as per the OpenTracing specification. +//! +//! This exporter therefore takes a different approach of naming the span with the name of the +//! tracing provider, and using the span name to set the resource_name. This should in most cases +//! lead to the behaviour that users expect. +//! +//! Datadog additionally has a span_type string that alters the rendering of the spans in the web UI. +//! This can be set as the `span.type` OpenTelemetry span attribute. +//! +//! For standard values see [here](https://github.com/DataDog/dd-trace-go/blob/ecb0b805ef25b00888a2fb62d465a5aa95e7301e/ddtrace/ext/app_types.go#L31). +//! +//! If the default mapping is not fit for your use case, you may change some of them by providing [`FieldMappingFn`]s in pipeline. +//! +//! ## Performance +//! +//! For optimal performance, a batch exporter is recommended as the simple exporter will export +//! each span synchronously on drop. You can enable the [`rt-tokio`], [`rt-tokio-current-thread`] +//! or [`rt-async-std`] features and specify a runtime on the pipeline to have a batch exporter +//! configured for you automatically. +//! +//! ```toml +//! [dependencies] +//! opentelemetry = { version = "*", features = ["rt-tokio"] } +//! opentelemetry-datadog = "*" +//! ``` +//! +//! ```no_run +//! # fn main() -> Result<(), opentelemetry::trace::TraceError> { +//! let tracer = opentelemetry_datadog::new_pipeline() +//! .install_batch(opentelemetry_sdk::runtime::Tokio)?; +//! # Ok(()) +//! # } +//! ``` +//! +//! [`rt-tokio`]: https://tokio.rs +//! [`rt-tokio-current-thread`]: https://tokio.rs +//! [`rt-async-std`]: https://async.rs +//! +//! ## Bring your own http client +//! +//! Users can choose appropriate http clients to align with their runtime. +//! +//! Based on the feature enabled. The default http client will be different. If user doesn't specific +//! features or enabled `reqwest-blocking-client` feature. The blocking reqwest http client will be used as +//! default client. If `reqwest-client` feature is enabled. The async reqwest http client will be used. If +//! `surf-client` feature is enabled. The surf http client will be used. +//! +//! Note that async http clients may need specific runtime otherwise it will panic. User should make +//! sure the http client is running in appropriate runime. +//! +//! Users can always use their own http clients by implementing `HttpClient` trait. +//! +//! ## Kitchen Sink Full Configuration +//! +//! Example showing how to override all configuration options. See the +//! [`DatadogPipelineBuilder`] docs for details of each option. +//! +//! [`DatadogPipelineBuilder`]: struct.DatadogPipelineBuilder.html +//! +//! ```no_run +//! use opentelemetry::{KeyValue, trace::Tracer}; +//! use opentelemetry_sdk::{trace::{self, RandomIdGenerator, Sampler}, Resource}; +//! use opentelemetry_sdk::export::trace::ExportResult; +//! use opentelemetry::global::shutdown_tracer_provider; +//! use opentelemetry_datadog::{new_pipeline, ApiVersion, Error}; +//! use opentelemetry_http::{HttpClient, HttpError}; +//! use async_trait::async_trait; +//! use bytes::Bytes; +//! use futures_util::io::AsyncReadExt as _; +//! use http::{Request, Response}; +//! use std::convert::TryInto as _; +//! +//! // `reqwest` and `surf` are supported through features, if you prefer an +//! // alternate http client you can add support by implementing `HttpClient` as +//! // shown here. +//! #[derive(Debug)] +//! struct IsahcClient(isahc::HttpClient); +//! +//! #[async_trait] +//! impl HttpClient for IsahcClient { +//! async fn send(&self, request: Request>) -> Result, HttpError> { +//! let mut response = self.0.send_async(request).await?; +//! let status = response.status(); +//! let mut bytes = Vec::with_capacity(response.body().len().unwrap_or(0).try_into()?); +//! isahc::AsyncReadResponseExt::copy_to(&mut response, &mut bytes).await?; +//! +//! Ok(Response::builder() +//! .status(response.status()) +//! .body(bytes.into())?) +//! } +//! } +//! +//! fn main() -> Result<(), opentelemetry::trace::TraceError> { +//! let tracer = new_pipeline() +//! .with_service_name("my_app") +//! .with_api_version(ApiVersion::Version05) +//! .with_agent_endpoint("http://localhost:8126") +//! .with_trace_config( +//! trace::config() +//! .with_sampler(Sampler::AlwaysOn) +//! .with_id_generator(RandomIdGenerator::default()) +//! ) +//! .install_batch(opentelemetry_sdk::runtime::Tokio)?; +//! +//! tracer.in_span("doing_work", |cx| { +//! // Traced app logic here... +//! }); +//! +//! shutdown_tracer_provider(); // sending remaining spans before exit +//! +//! Ok(()) +//! } +//! ``` + +mod exporter; + +pub use exporter::{ + new_pipeline, ApiVersion, DatadogExporter, DatadogPipelineBuilder, Error, FieldMappingFn, + ModelConfig, +}; +pub use propagator::DatadogPropagator; + +mod propagator { + use once_cell::sync::Lazy; + use opentelemetry::{ + propagation::{text_map_propagator::FieldIter, Extractor, Injector, TextMapPropagator}, + trace::{SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId, TraceState}, + Context, + }; + + const DATADOG_TRACE_ID_HEADER: &str = "x-datadog-trace-id"; + const DATADOG_PARENT_ID_HEADER: &str = "x-datadog-parent-id"; + const DATADOG_SAMPLING_PRIORITY_HEADER: &str = "x-datadog-sampling-priority"; + + const TRACE_FLAG_DEFERRED: TraceFlags = TraceFlags::new(0x02); + + static DATADOG_HEADER_FIELDS: Lazy<[String; 3]> = Lazy::new(|| { + [ + DATADOG_TRACE_ID_HEADER.to_string(), + DATADOG_PARENT_ID_HEADER.to_string(), + DATADOG_SAMPLING_PRIORITY_HEADER.to_string(), + ] + }); + + enum SamplingPriority { + UserReject = -1, + AutoReject = 0, + AutoKeep = 1, + UserKeep = 2, + } + + #[derive(Debug)] + enum ExtractError { + TraceId, + SpanId, + SamplingPriority, + } + + /// Extracts and injects `SpanContext`s into `Extractor`s or `Injector`s using Datadog's header format. + /// + /// The Datadog header format does not have an explicit spec, but can be divined from the client libraries, + /// such as [dd-trace-go] + /// + /// ## Example + /// + /// ``` + /// use opentelemetry::global; + /// use opentelemetry_datadog::DatadogPropagator; + /// + /// global::set_text_map_propagator(DatadogPropagator::default()); + /// ``` + /// + /// [dd-trace-go]: https://github.com/DataDog/dd-trace-go/blob/v1.28.0/ddtrace/tracer/textmap.go#L293 + #[derive(Clone, Debug, Default)] + pub struct DatadogPropagator { + _private: (), + } + + impl DatadogPropagator { + /// Creates a new `DatadogPropagator`. + pub fn new() -> Self { + DatadogPropagator::default() + } + + fn extract_trace_id(&self, trace_id: &str) -> Result { + trace_id + .parse::() + .map(|id| TraceId::from(id as u128)) + .map_err(|_| ExtractError::TraceId) + } + + fn extract_span_id(&self, span_id: &str) -> Result { + span_id + .parse::() + .map(SpanId::from) + .map_err(|_| ExtractError::SpanId) + } + + fn extract_sampling_priority( + &self, + sampling_priority: &str, + ) -> Result { + let i = sampling_priority + .parse::() + .map_err(|_| ExtractError::SamplingPriority)?; + + match i { + -1 => Ok(SamplingPriority::UserReject), + 0 => Ok(SamplingPriority::AutoReject), + 1 => Ok(SamplingPriority::AutoKeep), + 2 => Ok(SamplingPriority::UserKeep), + _ => Err(ExtractError::SamplingPriority), + } + } + + fn extract_span_context( + &self, + extractor: &dyn Extractor, + ) -> Result { + let trace_id = + self.extract_trace_id(extractor.get(DATADOG_TRACE_ID_HEADER).unwrap_or(""))?; + // If we have a trace_id but can't get the parent span, we default it to invalid instead of completely erroring + // out so that the rest of the spans aren't completely lost + let span_id = self + .extract_span_id(extractor.get(DATADOG_PARENT_ID_HEADER).unwrap_or("")) + .unwrap_or(SpanId::INVALID); + let sampling_priority = self.extract_sampling_priority( + extractor + .get(DATADOG_SAMPLING_PRIORITY_HEADER) + .unwrap_or(""), + ); + let sampled = match sampling_priority { + Ok(SamplingPriority::UserReject) | Ok(SamplingPriority::AutoReject) => { + TraceFlags::default() + } + Ok(SamplingPriority::UserKeep) | Ok(SamplingPriority::AutoKeep) => { + TraceFlags::SAMPLED + } + // Treat the sampling as DEFERRED instead of erroring on extracting the span context + Err(_) => TRACE_FLAG_DEFERRED, + }; + + let trace_state = TraceState::default(); + + Ok(SpanContext::new( + trace_id, + span_id, + sampled, + true, + trace_state, + )) + } + } + + impl TextMapPropagator for DatadogPropagator { + fn inject_context(&self, cx: &Context, injector: &mut dyn Injector) { + let span = cx.span(); + let span_context = span.span_context(); + if span_context.is_valid() { + injector.set( + DATADOG_TRACE_ID_HEADER, + (u128::from_be_bytes(span_context.trace_id().to_bytes()) as u64).to_string(), + ); + injector.set( + DATADOG_PARENT_ID_HEADER, + u64::from_be_bytes(span_context.span_id().to_bytes()).to_string(), + ); + + if span_context.trace_flags() & TRACE_FLAG_DEFERRED != TRACE_FLAG_DEFERRED { + let sampling_priority = if span_context.is_sampled() { + SamplingPriority::AutoKeep + } else { + SamplingPriority::AutoReject + }; + + injector.set( + DATADOG_SAMPLING_PRIORITY_HEADER, + (sampling_priority as i32).to_string(), + ); + } + } + } + + fn extract_with_context(&self, cx: &Context, extractor: &dyn Extractor) -> Context { + self.extract_span_context(extractor) + .map(|sc| cx.with_remote_span_context(sc)) + .unwrap_or_else(|_| cx.clone()) + } + + fn fields(&self) -> FieldIter<'_> { + FieldIter::new(DATADOG_HEADER_FIELDS.as_ref()) + } + } + + #[cfg(test)] + mod tests { + use super::*; + use opentelemetry::trace::TraceState; + use opentelemetry_sdk::testing::trace::TestSpan; + use std::collections::HashMap; + + #[rustfmt::skip] + fn extract_test_data() -> Vec<(Vec<(&'static str, &'static str)>, SpanContext)> { + vec![ + (vec![], SpanContext::empty_context()), + (vec![(DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::empty_context()), + (vec![(DATADOG_TRACE_ID_HEADER, "garbage")], SpanContext::empty_context()), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "garbage")], SpanContext::new(TraceId::from_u128(1234), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + ] + } + + #[rustfmt::skip] + fn inject_test_data() -> Vec<(Vec<(&'static str, &'static str)>, SpanContext)> { + vec![ + (vec![], SpanContext::empty_context()), + (vec![], SpanContext::new(TraceId::INVALID, SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![], SpanContext::new(TraceId::from_hex("1234").unwrap(), SpanId::INVALID, TraceFlags::SAMPLED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TRACE_FLAG_DEFERRED, true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "0")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::default(), true, TraceState::default())), + (vec![(DATADOG_TRACE_ID_HEADER, "1234"), (DATADOG_PARENT_ID_HEADER, "12"), (DATADOG_SAMPLING_PRIORITY_HEADER, "1")], SpanContext::new(TraceId::from_u128(1234), SpanId::from_u64(12), TraceFlags::SAMPLED, true, TraceState::default())), + ] + } + + #[test] + fn test_extract() { + for (header_list, expected) in extract_test_data() { + let map: HashMap = header_list + .into_iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect(); + + let propagator = DatadogPropagator::default(); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &expected); + } + } + + #[test] + fn test_extract_empty() { + let map: HashMap = HashMap::new(); + let propagator = DatadogPropagator::default(); + let context = propagator.extract(&map); + assert_eq!(context.span().span_context(), &SpanContext::empty_context()) + } + + #[test] + fn test_extract_with_empty_remote_context() { + let map: HashMap = HashMap::new(); + let propagator = DatadogPropagator::default(); + let context = propagator.extract_with_context(&Context::new(), &map); + assert!(!context.has_active_span()) + } + + #[test] + fn test_inject() { + let propagator = DatadogPropagator::default(); + for (header_values, span_context) in inject_test_data() { + let mut injector: HashMap = HashMap::new(); + propagator.inject_context( + &Context::current_with_span(TestSpan(span_context)), + &mut injector, + ); + + if !header_values.is_empty() { + for (k, v) in header_values.into_iter() { + let injected_value: Option<&String> = injector.get(k); + assert_eq!(injected_value, Some(&v.to_string())); + injector.remove(k); + } + } + assert!(injector.is_empty()); + } + } + } +} diff --git a/opentelemetry-dynatrace/CHANGELOG.md b/opentelemetry-dynatrace/CHANGELOG.md new file mode 100644 index 00000000..495a55f6 --- /dev/null +++ b/opentelemetry-dynatrace/CHANGELOG.md @@ -0,0 +1,31 @@ +# Changelog + +## v0.4.0 + +### Changed + +- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) +- Add deprecation note to Dynatrace exporter +- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) + +## v0.3.0 + +### Changed + +- Update to `opentelemetry` v0.19.0 +- Update to `opentelemetry-http` v0.8.0 +- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953) +- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). + +## v0.2.0 + +### Changed + +- Update to opentelemetry v0.18.0 +- Update to opentelemetry-http v0.7.0 + +## v0.1.0 + +### Added + +- Dynatrace metrics exporter diff --git a/opentelemetry-dynatrace/CODEOWNERS b/opentelemetry-dynatrace/CODEOWNERS new file mode 100644 index 00000000..d6962a90 --- /dev/null +++ b/opentelemetry-dynatrace/CODEOWNERS @@ -0,0 +1,5 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. + +# For anything not explicitly taken by someone else: +* @open-telemetry/rust-approvers diff --git a/opentelemetry-dynatrace/LICENSE b/opentelemetry-dynatrace/LICENSE new file mode 100644 index 00000000..23a2acab --- /dev/null +++ b/opentelemetry-dynatrace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 The OpenTelemetry Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/opentelemetry-dynatrace/README.md b/opentelemetry-dynatrace/README.md new file mode 100644 index 00000000..0f978d02 --- /dev/null +++ b/opentelemetry-dynatrace/README.md @@ -0,0 +1,16 @@ +![OpenTelemetry — An observability framework for cloud-native software.][splash] + +[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png + +# Dynatrace + +The final release of this crate was 0.4.0. Dynatrace now recommends using the OTLP exporter. They also provide a [migration guide] +For an example on how to configure the OTLP exporter in a Rust application, check out the [Rust integration walk-through] page in the Dynatrace documentation. + +[migration guide]: https://www.dynatrace.com/support/help/shortlink/migrating-dynatrace-metrics-exporter-otlp-exporter#migrate-applications +[Rust integration walk-through]: https://www.dynatrace.com/support/help/shortlink/otel-wt-rust + + +## Notice Removal + +This README and directory can be removed after any time in 2024 at least 6 months from the last release date of `opentelemetry-dynatrace`. diff --git a/opentelemetry-stackdriver/CHANGELOG.md b/opentelemetry-stackdriver/CHANGELOG.md new file mode 100644 index 00000000..7bcc0805 --- /dev/null +++ b/opentelemetry-stackdriver/CHANGELOG.md @@ -0,0 +1,107 @@ +# Changelog + +## vNext + +WARNING The current version relies on features only in upstream git version. This should be modified before releasing. + +## v0.18.0 + +### Changed + +- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) +- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) + +## v0.17.0 + +### Added + +- Send resource along with span attributes and kind/status #1035 +- Add option to authenticate with existing GCP Authentication Manager #1128 + +### Changed + +- Update gRPC schemas #992 +- Upgrade gcp-auth to 0.9 #1137 +- Update to opentelemetry v0.20.0 + +## v0.16.0 + +### Changed +- Update to `opentelemetry` v0.19. +- Update to `opentelemetry-semantic-conventions` v0.11. +- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953). +- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). +- Update grpc schemas [#992](https://github.com/open-telemetry/opentelemetry-rust/pull/992). + +## v0.15.0 + +### Added + +- Added mappings from OTel attributes to Google Cloud Traces #744 +- Added `MonitoredResource::CloudRunRevision` #847 + +### Changed + +- Upgrade to opentelemetry v0.18.0 +- Upgrade to opentelemetry-semantic-conventions v0.10 +- update tonic and prost #825 + +### Fixed + +- Fix `LogEntry.trace` not populated correctly #850 + +## v0.14.0 + +### Changed + +- Upgrade to new gcp_auth version (#722) +- Stop leaking dependency error types into public API (#722) +- Clarify type of MonitoredResource (#722) + +### Fixed + +- Fixed issue with futures dependency (#722) +- Don't set up logging channel if no logging is configured (#722) + +## v0.13.0 + +### Changed + +- Send export errors to global error handler (#705) +- Return `impl Future` to avoid spawning inside library (#703) +- Implement builder API to simplify configuration (#702) +- Use TLS configuration provided by tonic (#702) +- Optionally send events to Cloud Logging (#702) +- Exclude default `tonic-build` features #635 +- Update `gcp_auth` dependency to `0.5.0` #639 +- Include the server's message in error display #642 +- Update `tonic` to 0.6 #660 +- Update gcp_auth and yup-oauth2 to latest versions #700 +- Update to opentelemetry v0.17.0 + +### Fixed + +- Avoid calling log from inside exporter #709 + +## v0.12.0 + +### Changed + +- Update to opentelemetry v0.16.0 + +## v0.11.0 + +### Changed + +- Update to opentelemetry v0.15.0 + +## v0.10.0 + +### Changed + +- Update to opentelemetry v0.14.0 + +## v0.9.0 + +### Changed +- Move opentelemetry-stackdriver into opentelemetry-rust repo #487 diff --git a/opentelemetry-stackdriver/Cargo.toml b/opentelemetry-stackdriver/Cargo.toml new file mode 100644 index 00000000..b4803f72 --- /dev/null +++ b/opentelemetry-stackdriver/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "opentelemetry-stackdriver" +version = "0.18.0" +description = "A Rust opentelemetry exporter that uploads traces to Google Stackdriver trace." +documentation = "https://docs.rs/opentelemetry-stackdriver/" +repository = "https://github.com/open-telemetry/opentelemetry-rust" +license = "Apache-2.0" +edition = "2021" +exclude = ["/proto"] +rust-version = "1.65" + +[dependencies] +async-trait = "0.1.48" +gcp_auth = { version = "0.9", optional = true } +hex = "0.4" +http = "0.2" +hyper = "0.14.2" +hyper-rustls = { version = "0.24", optional = true } +# TODO: Replace with opentelemetry version before release +opentelemetry = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", branch = "main" } +# TODO: Replace with opentelemetry version before release +opentelemetry_sdk = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", branch = "main" } +opentelemetry-semantic-conventions = { version = "0.13" } +prost = "0.11.0" +prost-types = "0.11.1" +thiserror = "1.0.30" +tonic = { version = "0.9.0", features = ["gzip", "tls", "transport"] } +yup-oauth2 = { version = "8.1.0", optional = true } + +# Futures +futures-core = "0.3" +futures-util = { version = "0.3", default-features = false, features = ["alloc"] } +futures-channel = { version = "0.3", default-features = false, features = ["std"] } + +[features] +default = ["yup-authorizer", "tls-native-roots"] +yup-authorizer = ["hyper-rustls", "yup-oauth2"] +tls-native-roots = ["tonic/tls-roots"] +tls-webpki-roots = ["tonic/tls-webpki-roots"] + +[dev-dependencies] +reqwest = "0.11.9" +tempfile = "3.3.0" +tokio = "1" +tonic-build = "0.9.0" +walkdir = "2.3.2" +futures-util = { version = "0.3", default-features = false } diff --git a/opentelemetry-stackdriver/LICENSE-APACHE b/opentelemetry-stackdriver/LICENSE-APACHE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/opentelemetry-stackdriver/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/opentelemetry-stackdriver/README.md b/opentelemetry-stackdriver/README.md new file mode 100644 index 00000000..bce58731 --- /dev/null +++ b/opentelemetry-stackdriver/README.md @@ -0,0 +1,7 @@ +# opentelemetry-stackdriver + +[Documentation](https://docs.rs/opentelemetry-stackdriver/) + +This crate provides an `opentelemetry` exporter for use with Google StackDriver trace. It uses gRPC to send tracing spans. + +It is licensed under the Apache 2.0 license. Contributions are welcome. diff --git a/opentelemetry-stackdriver/proto/google/api/annotations.proto b/opentelemetry-stackdriver/proto/google/api/annotations.proto new file mode 100644 index 00000000..efdab3db --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/opentelemetry-stackdriver/proto/google/api/client.proto b/opentelemetry-stackdriver/proto/google/api/client.proto new file mode 100644 index 00000000..227ccf3a --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/api/client.proto @@ -0,0 +1,349 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/launch_stage.proto"; +import "google/protobuf/descriptor.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "ClientProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // * Adding this annotation to an unannotated method is backwards + // compatible. + // * Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // * Modifying or removing an existing method signature annotation is + // a breaking change. + // * Re-ordering existing method signature annotations is a breaking + // change. + repeated string method_signature = 1051; +} + +extend google.protobuf.ServiceOptions { + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + string default_host = 1049; + + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + string oauth_scopes = 1050; +} + +// Required information for every language. +message CommonLanguageSettings { + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + string reference_docs_uri = 1 [deprecated = true]; + + // The destination where API teams want this client library to be published. + repeated ClientLibraryDestination destinations = 2; +} + +// Details about how and where to publish client libraries. +message ClientLibrarySettings { + // Version of the API to apply these settings to. + string version = 1; + + // Launch stage of this version of the API. + LaunchStage launch_stage = 2; + + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + bool rest_numeric_enums = 3; + + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings java_settings = 21; + + // Settings for C++ client libraries. + CppSettings cpp_settings = 22; + + // Settings for PHP client libraries. + PhpSettings php_settings = 23; + + // Settings for Python client libraries. + PythonSettings python_settings = 24; + + // Settings for Node client libraries. + NodeSettings node_settings = 25; + + // Settings for .NET client libraries. + DotnetSettings dotnet_settings = 26; + + // Settings for Ruby client libraries. + RubySettings ruby_settings = 27; + + // Settings for Go client libraries. + GoSettings go_settings = 28; +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +message Publishing { + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + repeated MethodSettings method_settings = 2; + + // Link to a place that API users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + string new_issue_uri = 101; + + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + string documentation_uri = 102; + + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + string api_short_name = 103; + + // GitHub label to apply to issues and pull requests opened for this API. + string github_label = 104; + + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + repeated string codeowner_github_teams = 105; + + // A prefix used in sample code when demarking regions to be included in + // documentation. + string doc_tag_prefix = 106; + + // For whom the client library is being published. + ClientLibraryOrganization organization = 107; + + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + repeated ClientLibrarySettings library_settings = 109; +} + +// Settings for Java client libraries. +message JavaSettings { + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + string library_package = 1; + + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + map service_class_names = 2; + + // Some settings. + CommonLanguageSettings common = 3; +} + +// Settings for C++ client libraries. +message CppSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Php client libraries. +message PhpSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Python client libraries. +message PythonSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Node client libraries. +message NodeSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Dotnet client libraries. +message DotnetSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Ruby client libraries. +message RubySettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Settings for Go client libraries. +message GoSettings { + // Some settings. + CommonLanguageSettings common = 1; +} + +// Describes the generator configuration for a method. +message MethodSettings { + // Describes settings to use when generating API methods that use the + // long-running operation pattern. + // All default values below are from those used in the client library + // generators (e.g. + // [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). + message LongRunning { + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + google.protobuf.Duration initial_poll_delay = 1; + + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + float poll_delay_multiplier = 2; + + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + google.protobuf.Duration max_poll_delay = 3; + + // Total polling timeout. + // Default value: 5 minutes. + google.protobuf.Duration total_poll_timeout = 4; + } + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + string selector = 1; + + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_behavior: + // - selector: CreateAdDomain + // long_running: + // initial_poll_delay: + // seconds: 60 # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: + // seconds: 360 # 6 minutes + // total_poll_timeout: + // seconds: 54000 # 90 minutes + LongRunning long_running = 2; +} + +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +enum ClientLibraryOrganization { + // Not useful. + CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED = 0; + + // Google Cloud Platform Org. + CLOUD = 1; + + // Ads (Advertising) Org. + ADS = 2; + + // Photos Org. + PHOTOS = 3; + + // Street View Org. + STREET_VIEW = 4; +} + +// To where should client libraries be published? +enum ClientLibraryDestination { + // Client libraries will neither be generated nor published to package + // managers. + CLIENT_LIBRARY_DESTINATION_UNSPECIFIED = 0; + + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + GITHUB = 10; + + // Publish the library to package managers like nuget.org and npmjs.com. + PACKAGE_MANAGER = 20; +} diff --git a/opentelemetry-stackdriver/proto/google/api/field_behavior.proto b/opentelemetry-stackdriver/proto/google/api/field_behavior.proto new file mode 100644 index 00000000..c4abe3b6 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/api/field_behavior.proto @@ -0,0 +1,90 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "FieldBehaviorProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.FieldOptions { + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + repeated google.api.FieldBehavior field_behavior = 1052; +} + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +enum FieldBehavior { + // Conventional default for enums. Do not use this. + FIELD_BEHAVIOR_UNSPECIFIED = 0; + + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + OPTIONAL = 1; + + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + REQUIRED = 2; + + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + OUTPUT_ONLY = 3; + + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + INPUT_ONLY = 4; + + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + IMMUTABLE = 5; + + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + UNORDERED_LIST = 6; + + // Denotes that this field returns a non-empty default value if not set. + // This indicates that if the user provides the empty value in a request, + // a non-empty value will be returned. The user will not be aware of what + // non-empty value to expect. + NON_EMPTY_DEFAULT = 7; +} diff --git a/opentelemetry-stackdriver/proto/google/api/http.proto b/opentelemetry-stackdriver/proto/google/api/http.proto new file mode 100644 index 00000000..113fa936 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/api/http.proto @@ -0,0 +1,375 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/opentelemetry-stackdriver/proto/google/api/label.proto b/opentelemetry-stackdriver/proto/google/api/label.proto new file mode 100644 index 00000000..af294c91 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/api/label.proto @@ -0,0 +1,48 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/label;label"; +option java_multiple_files = true; +option java_outer_classname = "LabelProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// A description of a label. +message LabelDescriptor { + // Value types that can be used as label values. + enum ValueType { + // A variable-length string. This is the default. + STRING = 0; + + // Boolean; true or false. + BOOL = 1; + + // A 64-bit signed integer. + INT64 = 2; + } + + // The label key. + string key = 1; + + // The type of data that can be assigned to the label. + ValueType value_type = 2; + + // A human-readable description for the label. + string description = 3; +} diff --git a/opentelemetry-stackdriver/proto/google/api/launch_stage.proto b/opentelemetry-stackdriver/proto/google/api/launch_stage.proto new file mode 100644 index 00000000..6524db57 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/api/launch_stage.proto @@ -0,0 +1,72 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option go_package = "google.golang.org/genproto/googleapis/api;api"; +option java_multiple_files = true; +option java_outer_classname = "LaunchStageProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// The launch stage as defined by [Google Cloud Platform +// Launch Stages](https://cloud.google.com/terms/launch-stages). +enum LaunchStage { + // Do not use this default value. + LAUNCH_STAGE_UNSPECIFIED = 0; + + // The feature is not yet implemented. Users can not use it. + UNIMPLEMENTED = 6; + + // Prelaunch features are hidden from users and are only visible internally. + PRELAUNCH = 7; + + // Early Access features are limited to a closed group of testers. To use + // these features, you must sign up in advance and sign a Trusted Tester + // agreement (which includes confidentiality provisions). These features may + // be unstable, changed in backward-incompatible ways, and are not + // guaranteed to be released. + EARLY_ACCESS = 1; + + // Alpha is a limited availability test for releases before they are cleared + // for widespread use. By Alpha, all significant design issues are resolved + // and we are in the process of verifying functionality. Alpha customers + // need to apply for access, agree to applicable terms, and have their + // projects allowlisted. Alpha releases don't have to be feature complete, + // no SLAs are provided, and there are no technical support obligations, but + // they will be far enough along that customers can actually use them in + // test environments or for limited-use tests -- just like they would in + // normal production cases. + ALPHA = 2; + + // Beta is the point at which we are ready to open a release for any + // customer to use. There are no SLA or technical support obligations in a + // Beta release. Products will be complete from a feature perspective, but + // may have some open outstanding issues. Beta releases are suitable for + // limited production use cases. + BETA = 3; + + // GA features are open to all developers and are considered stable and + // fully qualified for production use. + GA = 4; + + // Deprecated features are scheduled to be shut down and removed. For more + // information, see the "Deprecation Policy" section of our [Terms of + // Service](https://cloud.google.com/terms/) + // and the [Google Cloud Platform Subject to the Deprecation + // Policy](https://cloud.google.com/terms/deprecation) documentation. + DEPRECATED = 5; +} diff --git a/opentelemetry-stackdriver/proto/google/api/monitored_resource.proto b/opentelemetry-stackdriver/proto/google/api/monitored_resource.proto new file mode 100644 index 00000000..8b97baa1 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/api/monitored_resource.proto @@ -0,0 +1,125 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/label.proto"; +import "google/api/launch_stage.proto"; +import "google/protobuf/struct.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/monitoredres;monitoredres"; +option java_multiple_files = true; +option java_outer_classname = "MonitoredResourceProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// An object that describes the schema of a [MonitoredResource][google.api.MonitoredResource] object using a +// type name and a set of labels. For example, the monitored resource +// descriptor for Google Compute Engine VM instances has a type of +// `"gce_instance"` and specifies the use of the labels `"instance_id"` and +// `"zone"` to identify particular VM instances. +// +// Different APIs can support different monitored resource types. APIs generally +// provide a `list` method that returns the monitored resource descriptors used +// by the API. +// +message MonitoredResourceDescriptor { + // Optional. The resource name of the monitored resource descriptor: + // `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where + // {type} is the value of the `type` field in this object and + // {project_id} is a project ID that provides API-specific context for + // accessing the type. APIs that do not use project information can use the + // resource name format `"monitoredResourceDescriptors/{type}"`. + string name = 5; + + // Required. The monitored resource type. For example, the type + // `"cloudsql_database"` represents databases in Google Cloud SQL. + // For a list of types, see [Monitoring resource + // types](https://cloud.google.com/monitoring/api/resources) + // and [Logging resource + // types](https://cloud.google.com/logging/docs/api/v2/resource-list). + string type = 1; + + // Optional. A concise name for the monitored resource type that might be + // displayed in user interfaces. It should be a Title Cased Noun Phrase, + // without any article or other determiners. For example, + // `"Google Cloud SQL Database"`. + string display_name = 2; + + // Optional. A detailed description of the monitored resource type that might + // be used in documentation. + string description = 3; + + // Required. A set of labels used to describe instances of this monitored + // resource type. For example, an individual Google Cloud SQL database is + // identified by values for the labels `"database_id"` and `"zone"`. + repeated LabelDescriptor labels = 4; + + // Optional. The launch stage of the monitored resource definition. + LaunchStage launch_stage = 7; +} + +// An object representing a resource that can be used for monitoring, logging, +// billing, or other purposes. Examples include virtual machine instances, +// databases, and storage devices such as disks. The `type` field identifies a +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object that describes the resource's +// schema. Information in the `labels` field identifies the actual resource and +// its attributes according to the schema. For example, a particular Compute +// Engine VM instance could be represented by the following object, because the +// [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels +// `"project_id"`, `"instance_id"` and `"zone"`: +// +// { "type": "gce_instance", +// "labels": { "project_id": "my-project", +// "instance_id": "12345678901234", +// "zone": "us-central1-a" }} +message MonitoredResource { + // Required. The monitored resource type. This field must match + // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For + // example, the type of a Compute Engine VM instance is `gce_instance`. + // Some descriptors include the service name in the type; for example, + // the type of a Datastream stream is `datastream.googleapis.com/Stream`. + string type = 1; + + // Required. Values for all of the labels listed in the associated monitored + // resource descriptor. For example, Compute Engine VM instances use the + // labels `"project_id"`, `"instance_id"`, and `"zone"`. + map labels = 2; +} + +// Auxiliary metadata for a [MonitoredResource][google.api.MonitoredResource] object. +// [MonitoredResource][google.api.MonitoredResource] objects contain the minimum set of information to +// uniquely identify a monitored resource instance. There is some other useful +// auxiliary metadata. Monitoring and Logging use an ingestion +// pipeline to extract metadata for cloud resources of all types, and store +// the metadata in this message. +message MonitoredResourceMetadata { + // Output only. Values for predefined system metadata labels. + // System labels are a kind of metadata extracted by Google, including + // "machine_image", "vpc", "subnet_id", + // "security_group", "name", etc. + // System label values can be only strings, Boolean values, or a list of + // strings. For example: + // + // { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], + // "spot_instance": false } + google.protobuf.Struct system_labels = 1; + + // Output only. A map of user-defined metadata labels. + map user_labels = 2; +} diff --git a/opentelemetry-stackdriver/proto/google/api/resource.proto b/opentelemetry-stackdriver/proto/google/api/resource.proto new file mode 100644 index 00000000..0ce0344f --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/api/resource.proto @@ -0,0 +1,238 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "ResourceProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.FieldOptions { + // An annotation that describes a resource reference, see + // [ResourceReference][]. + google.api.ResourceReference resource_reference = 1055; +} + +extend google.protobuf.FileOptions { + // An annotation that describes a resource definition without a corresponding + // message; see [ResourceDescriptor][]. + repeated google.api.ResourceDescriptor resource_definition = 1053; +} + +extend google.protobuf.MessageOptions { + // An annotation that describes a resource definition, see + // [ResourceDescriptor][]. + google.api.ResourceDescriptor resource = 1053; +} + +// A simple descriptor of a resource type. +// +// ResourceDescriptor annotates a resource message (either by means of a +// protobuf annotation or use in the service config), and associates the +// resource's schema, the resource type, and the pattern of the resource name. +// +// Example: +// +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// +// Sometimes, resources have multiple patterns, typically because they can +// live under multiple parents. +// +// Example: +// +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +message ResourceDescriptor { + // A description of the historical or future-looking state of the + // resource pattern. + enum History { + // The "unset" value. + HISTORY_UNSPECIFIED = 0; + + // The resource originally had one pattern and launched as such, and + // additional patterns were added later. + ORIGINALLY_SINGLE_PATTERN = 1; + + // The resource has one pattern, but the API owner expects to add more + // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + // that from being necessary once there are multiple patterns.) + FUTURE_MULTI_PATTERN = 2; + } + + // A flag representing a specific style that a resource claims to conform to. + enum Style { + // The unspecified value. Do not use. + STYLE_UNSPECIFIED = 0; + + // This resource is intended to be "declarative-friendly". + // + // Declarative-friendly resources must be more strictly consistent, and + // setting this to true communicates to tools that this resource should + // adhere to declarative-friendly expectations. + // + // Note: This is used by the API linter (linter.aip.dev) to enable + // additional checks. + DECLARATIVE_FRIENDLY = 1; + } + + // The resource type. It must be in the format of + // {service_name}/{resource_type_kind}. The `resource_type_kind` must be + // singular and must not include version numbers. + // + // Example: `storage.googleapis.com/Bucket` + // + // The value of the resource_type_kind must follow the regular expression + // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and + // should use PascalCase (UpperCamelCase). The maximum number of + // characters allowed for the `resource_type_kind` is 100. + string type = 1; + + // Optional. The relative resource name pattern associated with this resource + // type. The DNS prefix of the full resource name shouldn't be specified here. + // + // The path pattern must follow the syntax, which aligns with HTTP binding + // syntax: + // + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; + // + // Examples: + // + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" + // + // The components in braces correspond to the IDs for each resource in the + // hierarchy. It is expected that, if multiple patterns are provided, + // the same component name (e.g. "project") refers to IDs of the same + // type of resource. + repeated string pattern = 2; + + // Optional. The field on the resource that designates the resource name + // field. If omitted, this is assumed to be "name". + string name_field = 3; + + // Optional. The historical or future-looking state of the resource pattern. + // + // Example: + // + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } + History history = 4; + + // The plural name used in the resource name and permission names, such as + // 'projects' for the resource name of 'projects/{project}' and the permission + // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same + // concept of the `plural` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // + // Note: The plural form is required even for singleton resources. See + // https://aip.dev/156 + string plural = 5; + + // The same concept of the `singular` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // Such as "project" for the `resourcemanager.googleapis.com/Project` type. + string singular = 6; + + // Style flag(s) for this resource. + // These indicate that a resource is expected to conform to a given + // style. See the specific style flags for additional information. + repeated Style style = 10; +} + +// Defines a proto annotation that describes a string field that refers to +// an API resource. +message ResourceReference { + // The resource type that the annotated field references. + // + // Example: + // + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } + // + // Occasionally, a field may reference an arbitrary resource. In this case, + // APIs use the special value * in their resource reference. + // + // Example: + // + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } + string type = 1; + + // The resource type of a child collection that the annotated field + // references. This is useful for annotating the `parent` field that + // doesn't have a fixed resource type. + // + // Example: + // + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } + string child_type = 2; +} diff --git a/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/trace.proto b/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/trace.proto new file mode 100644 index 00000000..f2b4b481 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/trace.proto @@ -0,0 +1,387 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.devtools.cloudtrace.v2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.Trace.V2"; +option go_package = "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2;cloudtrace"; +option java_multiple_files = true; +option java_outer_classname = "TraceProto"; +option java_package = "com.google.devtools.cloudtrace.v2"; +option php_namespace = "Google\\Cloud\\Trace\\V2"; +option ruby_package = "Google::Cloud::Trace::V2"; + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Often, a trace contains a root span +// that describes the end-to-end latency, and one or more subspans for +// its sub-operations. +// +// A trace can also contain multiple root spans, or none at all. +// Spans do not need to be contiguous. There might be +// gaps or overlaps between spans in a trace. +message Span { + option (google.api.resource) = { + type: "cloudtrace.googleapis.com/Span" + pattern: "projects/{project}/traces/{trace}/spans/{span}" + }; + + // A set of attributes as key-value pairs. + message Attributes { + // A set of attributes. Each attribute's key can be up to 128 bytes + // long. The value can be a string up to 256 bytes, a signed 64-bit integer, + // or the boolean values `true` or `false`. For example: + // + // "/instance_id": { "string_value": { "value": "my-instance" } } + // "/http/request_bytes": { "int_value": 300 } + // "abc.com/myattribute": { "bool_value": false } + map attribute_map = 1; + + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0 then all attributes are valid. + int32 dropped_attributes_count = 2; + } + + // A time-stamped annotation or message event in the Span. + message TimeEvent { + // Text annotation with a set of attributes. + message Annotation { + // A user-supplied message describing the event. The maximum length for + // the description is 256 bytes. + TruncatableString description = 1; + + // A set of attributes on the annotation. You can have up to 4 attributes + // per Annotation. + Attributes attributes = 2; + } + + // An event describing a message sent/received between Spans. + message MessageEvent { + // Indicates whether the message was sent or received. + enum Type { + // Unknown event type. + TYPE_UNSPECIFIED = 0; + + // Indicates a sent message. + SENT = 1; + + // Indicates a received message. + RECEIVED = 2; + } + + // Type of MessageEvent. Indicates whether the message was sent or + // received. + Type type = 1; + + // An identifier for the MessageEvent's message that can be used to match + // `SENT` and `RECEIVED` MessageEvents. + int64 id = 2; + + // The number of uncompressed bytes sent or received. + int64 uncompressed_size_bytes = 3; + + // The number of compressed bytes sent or received. If missing, the + // compressed size is assumed to be the same size as the uncompressed + // size. + int64 compressed_size_bytes = 4; + } + + // The timestamp indicating the time the event occurred. + google.protobuf.Timestamp time = 1; + + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + oneof value { + // Text annotation with a set of attributes. + Annotation annotation = 2; + + // An event describing a message sent/received between Spans. + MessageEvent message_event = 3; + } + } + + // A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation + // on the span, consisting of either user-supplied key:value pairs, or + // details of a message sent/received between Spans. + message TimeEvents { + // A collection of `TimeEvent`s. + repeated TimeEvent time_event = 1; + + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + int32 dropped_annotations_count = 2; + + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + int32 dropped_message_events_count = 3; + } + + // A pointer from the current span to another span in the same trace or in a + // different trace. For example, this can be used in batching operations, + // where a single batch handler processes multiple requests from different + // traces or when the handler receives a request from a different project. + message Link { + // The relationship of the current span relative to the linked span: child, + // parent, or unspecified. + enum Type { + // The relationship of the two spans is unknown. + TYPE_UNSPECIFIED = 0; + + // The linked span is a child of the current span. + CHILD_LINKED_SPAN = 1; + + // The linked span is a parent of the current span. + PARENT_LINKED_SPAN = 2; + } + + // The `[TRACE_ID]` for a trace within a project. + string trace_id = 1; + + // The `[SPAN_ID]` for a span within a trace. + string span_id = 2; + + // The relationship of the current span relative to the linked span. + Type type = 3; + + // A set of attributes on the link. Up to 32 attributes can be + // specified per link. + Attributes attributes = 4; + } + + // A collection of links, which are references from this span to a span + // in the same or different trace. + message Links { + // A collection of links. + repeated Link link = 1; + + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + int32 dropped_links_count = 2; + } + + // Type of span. Can be used to specify additional relationships between spans + // in addition to a parent/child relationship. + enum SpanKind { + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind.INTERNAL to be default. + SPAN_KIND_UNSPECIFIED = 0; + + // Indicates that the span is used internally. Default value. + INTERNAL = 1; + + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SERVER = 2; + + // Indicates that the span covers the client-side wrapper around an RPC or + // other remote request. + CLIENT = 3; + + // Indicates that the span describes producer sending a message to a broker. + // Unlike client and server, there is no direct critical path latency + // relationship between producer and consumer spans (e.g. publishing a + // message to a pubsub service). + PRODUCER = 4; + + // Indicates that the span describes consumer receiving a message from a + // broker. Unlike client and server, there is no direct critical path + // latency relationship between producer and consumer spans (e.g. receiving + // a message from a pubsub service subscription). + CONSUMER = 5; + } + + // Required. The resource name of the span in the following format: + // + // * `projects/[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID]` + // + // `[TRACE_ID]` is a unique identifier for a trace within a project; + // it is a 32-character hexadecimal encoding of a 16-byte array. It should + // not be zero. + // + // `[SPAN_ID]` is a unique identifier for a span within a trace; it + // is a 16-character hexadecimal encoding of an 8-byte array. It should not + // be zero. + // . + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The `[SPAN_ID]` portion of the span's resource name. + string span_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // The `[SPAN_ID]` of this span's parent span. If this is a root span, + // then this field must be empty. + string parent_span_id = 3; + + // Required. A description of the span's operation (up to 128 bytes). + // Cloud Trace displays the description in the + // Cloud console. + // For example, the display name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name within an application and at the same call point. + // This makes it easier to correlate spans in different traces. + TruncatableString display_name = 4 [(google.api.field_behavior) = REQUIRED]; + + // Required. The start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server + // side, this is the time when the server's application handler starts + // running. + google.protobuf.Timestamp start_time = 5 + [(google.api.field_behavior) = REQUIRED]; + + // Required. The end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server + // side, this is the time when the server application handler stops running. + google.protobuf.Timestamp end_time = 6 + [(google.api.field_behavior) = REQUIRED]; + + // A set of attributes on the span. You can have up to 32 attributes per + // span. + Attributes attributes = 7; + + // Stack trace captured at the start of the span. + StackTrace stack_trace = 8; + + // A set of time events. You can have up to 32 annotations and 128 message + // events per span. + TimeEvents time_events = 9; + + // Links associated with the span. You can have up to 128 links per Span. + Links links = 10; + + // Optional. The final status for this span. + google.rpc.Status status = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Set this parameter to indicate whether this span is in + // the same process as its parent. If you do not set this parameter, + // Trace is unable to take advantage of this helpful information. + google.protobuf.BoolValue same_process_as_parent_span = 12 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The number of child spans that were generated while this span + // was active. If set, allows implementation to detect missing child spans. + google.protobuf.Int32Value child_span_count = 13 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Distinguishes between spans generated in a particular context. + // For example, two spans with the same name may be distinguished using + // `CLIENT` (caller) and `SERVER` (callee) to identify an RPC call. + SpanKind span_kind = 14 [(google.api.field_behavior) = OPTIONAL]; +} + +// The allowed types for `[VALUE]` in a `[KEY]:[VALUE]` attribute. +message AttributeValue { + // The type of the value. + oneof value { + // A string up to 256 bytes long. + TruncatableString string_value = 1; + + // A 64-bit signed integer. + int64 int_value = 2; + + // A Boolean value represented by `true` or `false`. + bool bool_value = 3; + } +} + +// A call stack appearing in a trace. +message StackTrace { + // Represents a single stack frame in a stack trace. + message StackFrame { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame (up to 1024 bytes). + TruncatableString function_name = 1; + + // An un-mangled function name, if `function_name` is mangled. + // To get information about name mangling, run + // [this search](https://www.google.com/search?q=cxx+name+mangling). + // The name can be fully-qualified (up to 1024 bytes). + TruncatableString original_function_name = 2; + + // The name of the source file where the function call appears (up to 256 + // bytes). + TruncatableString file_name = 3; + + // The line number in `file_name` where the function call appears. + int64 line_number = 4; + + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + int64 column_number = 5; + + // The binary module from where the code was loaded. + Module load_module = 6; + + // The version of the deployed source code (up to 128 bytes). + TruncatableString source_version = 7; + } + + // A collection of stack frames, which can be truncated. + message StackFrames { + // Stack frames in this call stack. + repeated StackFrame frame = 1; + + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + int32 dropped_frames_count = 2; + } + + // Stack frames in this stack trace. A maximum of 128 frames are allowed. + StackFrames stack_frames = 1; + + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both the + // `stackFrame` content and a value in `stackTraceHashId`. + // + // Subsequent spans within the same request can refer + // to that stack trace by only setting `stackTraceHashId`. + int64 stack_trace_hash_id = 2; +} + +// Binary module. +message Module { + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so (up to 256 bytes). + TruncatableString module = 1; + + // A unique identifier for the module, usually a hash of its + // contents (up to 128 bytes). + TruncatableString build_id = 2; +} + +// Represents a string that might be shortened to a specified length. +message TruncatableString { + // The shortened string. For example, if the original string is 500 + // bytes long and the limit of the string is 128 bytes, then + // `value` contains the first 128 bytes of the 500-byte string. + // + // Truncation always happens on a UTF8 character boundary. If there + // are multi-byte characters in the string, then the length of the + // shortened string might be less than the size limit. + string value = 1; + + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + int32 truncated_byte_count = 2; +} diff --git a/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/tracing.proto b/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/tracing.proto new file mode 100644 index 00000000..0aac221d --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/devtools/cloudtrace/v2/tracing.proto @@ -0,0 +1,80 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.devtools.cloudtrace.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/devtools/cloudtrace/v2/trace.proto"; +import "google/protobuf/empty.proto"; + +option csharp_namespace = "Google.Cloud.Trace.V2"; +option go_package = "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2;cloudtrace"; +option java_multiple_files = true; +option java_outer_classname = "TracingProto"; +option java_package = "com.google.devtools.cloudtrace.v2"; +option php_namespace = "Google\\Cloud\\Trace\\V2"; +option ruby_package = "Google::Cloud::Trace::V2"; + +// Service for collecting and viewing traces and spans within a trace. +// +// A trace is a collection of spans corresponding to a single +// operation or a set of operations in an application. +// +// A span is an individual timed event which forms a node of the trace tree. +// A single trace can contain spans from multiple services. +service TraceService { + option (google.api.default_host) = "cloudtrace.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/trace.append"; + + // Batch writes new spans to new or existing traces. You cannot update + // existing spans. + rpc BatchWriteSpans(BatchWriteSpansRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v2/{name=projects/*}/traces:batchWrite" + body: "*" + }; + option (google.api.method_signature) = "name,spans"; + } + + // Creates a new span. + rpc CreateSpan(Span) returns (Span) { + option (google.api.http) = { + post: "/v2/{name=projects/*/traces/*/spans/*}" + body: "*" + }; + } +} + +// The request message for the `BatchWriteSpans` method. +message BatchWriteSpansRequest { + // Required. The name of the project where the spans belong. The format is + // `projects/[PROJECT_ID]`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. A list of new spans. The span names must not match existing + // spans, otherwise the results are undefined. + repeated Span spans = 2 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto b/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto new file mode 100644 index 00000000..b878d60d --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/logging/type/http_request.proto @@ -0,0 +1,95 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.logging.type; + +import "google/protobuf/duration.proto"; + +option csharp_namespace = "Google.Cloud.Logging.Type"; +option go_package = "google.golang.org/genproto/googleapis/logging/type;ltype"; +option java_multiple_files = true; +option java_outer_classname = "HttpRequestProto"; +option java_package = "com.google.logging.type"; +option php_namespace = "Google\\Cloud\\Logging\\Type"; +option ruby_package = "Google::Cloud::Logging::Type"; + +// A common proto for logging HTTP requests. Only contains semantics +// defined by the HTTP specification. Product-specific logging +// information MUST be defined in a separate message. +message HttpRequest { + // The request method. Examples: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`. + string request_method = 1; + + // The scheme (http, https), the host name, the path and the query + // portion of the URL that was requested. + // Example: `"http://example.com/some/info?color=red"`. + string request_url = 2; + + // The size of the HTTP request message in bytes, including the request + // headers and the request body. + int64 request_size = 3; + + // The response code indicating the status of response. + // Examples: 200, 404. + int32 status = 4; + + // The size of the HTTP response message sent back to the client, in bytes, + // including the response headers and the response body. + int64 response_size = 5; + + // The user agent sent by the client. Example: + // `"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET + // CLR 1.0.3705)"`. + string user_agent = 6; + + // The IP address (IPv4 or IPv6) of the client that issued the HTTP + // request. This field can include port information. Examples: + // `"192.168.1.1"`, `"10.0.0.1:80"`, `"FE80::0202:B3FF:FE1E:8329"`. + string remote_ip = 7; + + // The IP address (IPv4 or IPv6) of the origin server that the request was + // sent to. This field can include port information. Examples: + // `"192.168.1.1"`, `"10.0.0.1:80"`, `"FE80::0202:B3FF:FE1E:8329"`. + string server_ip = 13; + + // The referer URL of the request, as defined in + // [HTTP/1.1 Header Field + // Definitions](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). + string referer = 8; + + // The request processing latency on the server, from the time the request was + // received until the response was sent. + google.protobuf.Duration latency = 14; + + // Whether or not a cache lookup was attempted. + bool cache_lookup = 11; + + // Whether or not an entity was served from cache + // (with or without validation). + bool cache_hit = 9; + + // Whether or not the response was validated with the origin server before + // being served from cache. This field is only meaningful if `cache_hit` is + // True. + bool cache_validated_with_origin_server = 10; + + // The number of HTTP response bytes inserted into cache. Set only when a + // cache fill was attempted. + int64 cache_fill_bytes = 12; + + // Protocol used for the request. Examples: "HTTP/1.1", "HTTP/2", "websocket" + string protocol = 15; +} diff --git a/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto b/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto new file mode 100644 index 00000000..bed71935 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/logging/type/log_severity.proto @@ -0,0 +1,71 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.logging.type; + +option csharp_namespace = "Google.Cloud.Logging.Type"; +option go_package = "google.golang.org/genproto/googleapis/logging/type;ltype"; +option java_multiple_files = true; +option java_outer_classname = "LogSeverityProto"; +option java_package = "com.google.logging.type"; +option objc_class_prefix = "GLOG"; +option php_namespace = "Google\\Cloud\\Logging\\Type"; +option ruby_package = "Google::Cloud::Logging::Type"; + +// The severity of the event described in a log entry, expressed as one of the +// standard severity levels listed below. For your reference, the levels are +// assigned the listed numeric values. The effect of using numeric values other +// than those listed is undefined. +// +// You can filter for log entries by severity. For example, the following +// filter expression will match log entries with severities `INFO`, `NOTICE`, +// and `WARNING`: +// +// severity > DEBUG AND severity <= WARNING +// +// If you are writing log entries, you should map other severity encodings to +// one of these standard levels. For example, you might map all of Java's FINE, +// FINER, and FINEST levels to `LogSeverity.DEBUG`. You can preserve the +// original severity level in the log entry payload if you wish. +enum LogSeverity { + // (0) The log entry has no assigned severity level. + DEFAULT = 0; + + // (100) Debug or trace information. + DEBUG = 100; + + // (200) Routine information, such as ongoing status or performance. + INFO = 200; + + // (300) Normal but significant events, such as start up, shut down, or + // a configuration change. + NOTICE = 300; + + // (400) Warning events might cause problems. + WARNING = 400; + + // (500) Error events are likely to cause problems. + ERROR = 500; + + // (600) Critical events cause more severe problems or outages. + CRITICAL = 600; + + // (700) A person must take an action immediately. + ALERT = 700; + + // (800) One or more systems are unusable. + EMERGENCY = 800; +} diff --git a/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto b/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto new file mode 100644 index 00000000..99712936 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/logging/v2/log_entry.proto @@ -0,0 +1,241 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.logging.v2; + +import "google/api/field_behavior.proto"; +import "google/api/monitored_resource.proto"; +import "google/api/resource.proto"; +import "google/logging/type/http_request.proto"; +import "google/logging/type/log_severity.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.Logging.V2"; +option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging"; +option java_multiple_files = true; +option java_outer_classname = "LogEntryProto"; +option java_package = "com.google.logging.v2"; +option php_namespace = "Google\\Cloud\\Logging\\V2"; +option ruby_package = "Google::Cloud::Logging::V2"; + +// An individual entry in a log. +message LogEntry { + option (google.api.resource) = { + type: "logging.googleapis.com/Log" + pattern: "projects/{project}/logs/{log}" + pattern: "organizations/{organization}/logs/{log}" + pattern: "folders/{folder}/logs/{log}" + pattern: "billingAccounts/{billing_account}/logs/{log}" + name_field: "log_name" + }; + + // Required. The resource name of the log to which this log entry belongs: + // + // "projects/[PROJECT_ID]/logs/[LOG_ID]" + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // + // A project number may be used in place of PROJECT_ID. The project number is + // translated to its corresponding PROJECT_ID internally and the `log_name` + // field will contain PROJECT_ID in queries and exports. + // + // `[LOG_ID]` must be URL-encoded within `log_name`. Example: + // `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + // + // `[LOG_ID]` must be less than 512 characters long and can only include the + // following characters: upper and lower case alphanumeric characters, + // forward-slash, underscore, hyphen, and period. + // + // For backward compatibility, if `log_name` begins with a forward-slash, such + // as `/projects/...`, then the log entry is ingested as usual, but the + // forward-slash is removed. Listing the log entry will not show the leading + // slash and filtering for a log name with a leading slash will never return + // any results. + string log_name = 12 [(google.api.field_behavior) = REQUIRED]; + + // Required. The monitored resource that produced this log entry. + // + // Example: a log entry that reports a database error would be associated with + // the monitored resource designating the particular database that reported + // the error. + google.api.MonitoredResource resource = 8 [(google.api.field_behavior) = REQUIRED]; + + // The log entry payload, which can be one of multiple types. + oneof payload { + // The log entry payload, represented as a protocol buffer. Some Google + // Cloud Platform services use this field for their log entry payloads. + // + // The following protocol buffer types are supported; user-defined types + // are not supported: + // + // "type.googleapis.com/google.cloud.audit.AuditLog" + // "type.googleapis.com/google.appengine.logging.v1.RequestLog" + google.protobuf.Any proto_payload = 2; + + // The log entry payload, represented as a Unicode string (UTF-8). + string text_payload = 3; + + // The log entry payload, represented as a structure that is + // expressed as a JSON object. + google.protobuf.Struct json_payload = 6; + } + + // Optional. The time the event described by the log entry occurred. This time is used + // to compute the log entry's age and to enforce the logs retention period. + // If this field is omitted in a new log entry, then Logging assigns it the + // current time. Timestamps have nanosecond accuracy, but trailing zeros in + // the fractional seconds might be omitted when the timestamp is displayed. + // + // Incoming log entries must have timestamps that don't exceed the + // [logs retention + // period](https://cloud.google.com/logging/quotas#logs_retention_periods) in + // the past, and that don't exceed 24 hours in the future. Log entries outside + // those time boundaries aren't ingested by Logging. + google.protobuf.Timestamp timestamp = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The time the log entry was received by Logging. + google.protobuf.Timestamp receive_timestamp = 24 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The severity of the log entry. The default value is `LogSeverity.DEFAULT`. + google.logging.type.LogSeverity severity = 10 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A unique identifier for the log entry. If you provide a value, then + // Logging considers other log entries in the same project, with the same + // `timestamp`, and with the same `insert_id` to be duplicates which are + // removed in a single query result. However, there are no guarantees of + // de-duplication in the export of logs. + // + // If the `insert_id` is omitted when writing a log entry, the Logging API + // assigns its own unique identifier in this field. + // + // In queries, the `insert_id` is also used to order log entries that have + // the same `log_name` and `timestamp` values. + string insert_id = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Information about the HTTP request associated with this log entry, if + // applicable. + google.logging.type.HttpRequest http_request = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A map of key, value pairs that provides additional information about the + // log entry. The labels can be user-defined or system-defined. + // + // User-defined labels are arbitrary key, value pairs that you can use to + // classify logs. + // + // System-defined labels are defined by GCP services for platform logs. + // They have two components - a service namespace component and the + // attribute name. For example: `compute.googleapis.com/resource_name`. + // + // Cloud Logging truncates label keys that exceed 512 B and label + // values that exceed 64 KB upon their associated log entry being + // written. The truncation is indicated by an ellipsis at the + // end of the character string. + map labels = 11 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Information about an operation associated with the log entry, if + // applicable. + LogEntryOperation operation = 15 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Resource name of the trace associated with the log entry, if any. If it + // contains a relative resource name, the name is assumed to be relative to + // `//tracing.googleapis.com`. Example: + // `projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824` + string trace = 22 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The span ID within the trace associated with the log entry. + // + // For Trace spans, this is the same format that the Trace API v2 uses: a + // 16-character hexadecimal encoding of an 8-byte array, such as + // `000000000000004a`. + string span_id = 27 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The sampling decision of the trace associated with the log entry. + // + // True means that the trace resource name in the `trace` field was sampled + // for storage in a trace backend. False means that the trace was not sampled + // for storage when this log entry was written, or the sampling decision was + // unknown at the time. A non-sampled `trace` value is still useful as a + // request correlation identifier. The default is False. + bool trace_sampled = 30 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Source code location information associated with the log entry, if any. + LogEntrySourceLocation source_location = 23 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Information indicating this LogEntry is part of a sequence of multiple log + // entries split from a single LogEntry. + LogSplit split = 35 [(google.api.field_behavior) = OPTIONAL]; +} + +// Additional information about a potentially long-running operation with which +// a log entry is associated. +message LogEntryOperation { + // Optional. An arbitrary operation identifier. Log entries with the same + // identifier are assumed to be part of the same operation. + string id = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. An arbitrary producer identifier. The combination of `id` and + // `producer` must be globally unique. Examples for `producer`: + // `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`. + string producer = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Set this to True if this is the first log entry in the operation. + bool first = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Set this to True if this is the last log entry in the operation. + bool last = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// Additional information about the source code location that produced the log +// entry. +message LogEntrySourceLocation { + // Optional. Source file name. Depending on the runtime environment, this + // might be a simple name or a fully-qualified name. + string file = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Line within the source file. 1-based; 0 indicates no line number + // available. + int64 line = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Human-readable name of the function or method being invoked, with + // optional context such as the class or package name. This information may be + // used in contexts such as the logs viewer, where a file and line number are + // less meaningful. The format can vary by language. For example: + // `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function` + // (Python). + string function = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Additional information used to correlate multiple log entries. Used when a +// single LogEntry would exceed the Google Cloud Logging size limit and is +// split across multiple log entries. +message LogSplit { + // A globally unique identifier for all log entries in a sequence of split log + // entries. All log entries with the same |LogSplit.uid| are assumed to be + // part of the same sequence of split log entries. + string uid = 1; + + // The index of this LogEntry in the sequence of split log entries. Log + // entries are given |index| values 0, 1, ..., n-1 for a sequence of n log + // entries. + int32 index = 2; + + // The total number of log entries that the original LogEntry was split into. + int32 total_splits = 3; +} diff --git a/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto b/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto new file mode 100644 index 00000000..b7f4f189 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/logging/v2/logging.proto @@ -0,0 +1,487 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.logging.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/monitored_resource.proto"; +import "google/api/resource.proto"; +import "google/logging/v2/log_entry.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/rpc/status.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.Logging.V2"; +option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging"; +option java_multiple_files = true; +option java_outer_classname = "LoggingProto"; +option java_package = "com.google.logging.v2"; +option php_namespace = "Google\\Cloud\\Logging\\V2"; +option ruby_package = "Google::Cloud::Logging::V2"; + +// Service for ingesting and querying logs. +service LoggingServiceV2 { + option (google.api.default_host) = "logging.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only," + "https://www.googleapis.com/auth/logging.admin," + "https://www.googleapis.com/auth/logging.read," + "https://www.googleapis.com/auth/logging.write"; + + // Deletes all the log entries in a log for the _Default Log Bucket. The log + // reappears if it receives new entries. Log entries written shortly before + // the delete operation might not be deleted. Entries received after the + // delete operation with a timestamp before the operation will be deleted. + rpc DeleteLog(DeleteLogRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{log_name=projects/*/logs/*}" + additional_bindings { + delete: "/v2/{log_name=*/*/logs/*}" + } + additional_bindings { + delete: "/v2/{log_name=organizations/*/logs/*}" + } + additional_bindings { + delete: "/v2/{log_name=folders/*/logs/*}" + } + additional_bindings { + delete: "/v2/{log_name=billingAccounts/*/logs/*}" + } + }; + option (google.api.method_signature) = "log_name"; + } + + // Writes log entries to Logging. This API method is the + // only way to send log entries to Logging. This method + // is used, directly or indirectly, by the Logging agent + // (fluentd) and all logging libraries configured to use Logging. + // A single request may contain log entries for a maximum of 1000 + // different resources (projects, organizations, billing accounts or + // folders) + rpc WriteLogEntries(WriteLogEntriesRequest) returns (WriteLogEntriesResponse) { + option (google.api.http) = { + post: "/v2/entries:write" + body: "*" + }; + option (google.api.method_signature) = "log_name,resource,labels,entries"; + } + + // Lists log entries. Use this method to retrieve log entries that originated + // from a project/folder/organization/billing account. For ways to export log + // entries, see [Exporting + // Logs](https://cloud.google.com/logging/docs/export). + rpc ListLogEntries(ListLogEntriesRequest) returns (ListLogEntriesResponse) { + option (google.api.http) = { + post: "/v2/entries:list" + body: "*" + }; + option (google.api.method_signature) = "resource_names,filter,order_by"; + } + + // Lists the descriptors for monitored resource types used by Logging. + rpc ListMonitoredResourceDescriptors(ListMonitoredResourceDescriptorsRequest) returns (ListMonitoredResourceDescriptorsResponse) { + option (google.api.http) = { + get: "/v2/monitoredResourceDescriptors" + }; + } + + // Lists the logs in projects, organizations, folders, or billing accounts. + // Only logs that have entries are listed. + rpc ListLogs(ListLogsRequest) returns (ListLogsResponse) { + option (google.api.http) = { + get: "/v2/{parent=*/*}/logs" + additional_bindings { + get: "/v2/{parent=projects/*}/logs" + } + additional_bindings { + get: "/v2/{parent=organizations/*}/logs" + } + additional_bindings { + get: "/v2/{parent=folders/*}/logs" + } + additional_bindings { + get: "/v2/{parent=billingAccounts/*}/logs" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Streaming read of log entries as they are ingested. Until the stream is + // terminated, it will continue reading logs. + rpc TailLogEntries(stream TailLogEntriesRequest) returns (stream TailLogEntriesResponse) { + option (google.api.http) = { + post: "/v2/entries:tail" + body: "*" + }; + } +} + +// The parameters to DeleteLog. +message DeleteLogRequest { + // Required. The resource name of the log to delete: + // + // * `projects/[PROJECT_ID]/logs/[LOG_ID]` + // * `organizations/[ORGANIZATION_ID]/logs/[LOG_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]` + // * `folders/[FOLDER_ID]/logs/[LOG_ID]` + // + // `[LOG_ID]` must be URL-encoded. For example, + // `"projects/my-project-id/logs/syslog"`, + // `"organizations/123/logs/cloudaudit.googleapis.com%2Factivity"`. + // + // For more information about log names, see + // [LogEntry][google.logging.v2.LogEntry]. + string log_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/Log" + } + ]; +} + +// The parameters to WriteLogEntries. +message WriteLogEntriesRequest { + // Optional. A default log resource name that is assigned to all log entries + // in `entries` that do not specify a value for `log_name`: + // + // * `projects/[PROJECT_ID]/logs/[LOG_ID]` + // * `organizations/[ORGANIZATION_ID]/logs/[LOG_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]` + // * `folders/[FOLDER_ID]/logs/[LOG_ID]` + // + // `[LOG_ID]` must be URL-encoded. For example: + // + // "projects/my-project-id/logs/syslog" + // "organizations/123/logs/cloudaudit.googleapis.com%2Factivity" + // + // The permission `logging.logEntries.create` is needed on each project, + // organization, billing account, or folder that is receiving new log + // entries, whether the resource is specified in `logName` or in an + // individual log entry. + string log_name = 1 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + type: "logging.googleapis.com/Log" + } + ]; + + // Optional. A default monitored resource object that is assigned to all log + // entries in `entries` that do not specify a value for `resource`. Example: + // + // { "type": "gce_instance", + // "labels": { + // "zone": "us-central1-a", "instance_id": "00000000000000000000" }} + // + // See [LogEntry][google.logging.v2.LogEntry]. + google.api.MonitoredResource resource = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Default labels that are added to the `labels` field of all log + // entries in `entries`. If a log entry already has a label with the same key + // as a label in this parameter, then the log entry's label is not changed. + // See [LogEntry][google.logging.v2.LogEntry]. + map labels = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The log entries to send to Logging. The order of log + // entries in this list does not matter. Values supplied in this method's + // `log_name`, `resource`, and `labels` fields are copied into those log + // entries in this list that do not include values for their corresponding + // fields. For more information, see the + // [LogEntry][google.logging.v2.LogEntry] type. + // + // If the `timestamp` or `insert_id` fields are missing in log entries, then + // this method supplies the current time or a unique identifier, respectively. + // The supplied values are chosen so that, among the log entries that did not + // supply their own values, the entries earlier in the list will sort before + // the entries later in the list. See the `entries.list` method. + // + // Log entries with timestamps that are more than the + // [logs retention period](https://cloud.google.com/logging/quotas) in + // the past or more than 24 hours in the future will not be available when + // calling `entries.list`. However, those log entries can still be [exported + // with + // LogSinks](https://cloud.google.com/logging/docs/api/tasks/exporting-logs). + // + // To improve throughput and to avoid exceeding the + // [quota limit](https://cloud.google.com/logging/quotas) for calls to + // `entries.write`, you should try to include several log entries in this + // list, rather than calling this method for each individual log entry. + repeated LogEntry entries = 4 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Whether valid entries should be written even if some other + // entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any + // entry is not written, then the response status is the error associated + // with one of the failed entries and the response includes error details + // keyed by the entries' zero-based index in the `entries.write` method. + bool partial_success = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If true, the request should expect normal response, but the + // entries won't be persisted nor exported. Useful for checking whether the + // logging API endpoints are working properly before sending valuable data. + bool dry_run = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from WriteLogEntries. +message WriteLogEntriesResponse { + +} + +// Error details for WriteLogEntries with partial success. +message WriteLogEntriesPartialErrors { + // When `WriteLogEntriesRequest.partial_success` is true, records the error + // status for entries that were not written due to a permanent error, keyed + // by the entry's zero-based index in `WriteLogEntriesRequest.entries`. + // + // Failed requests for which no entries are written will not include + // per-entry errors. + map log_entry_errors = 1; +} + +// The parameters to `ListLogEntries`. +message ListLogEntriesRequest { + // Required. Names of one or more parent resources from which to + // retrieve log entries: + // + // * `projects/[PROJECT_ID]` + // * `organizations/[ORGANIZATION_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]` + // * `folders/[FOLDER_ID]` + // + // May alternatively be one or more views: + // + // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // + // Projects listed in the `project_ids` field are added to this list. + repeated string resource_names = 8 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/Log" + } + ]; + + // Optional. A filter that chooses which log entries to return. See [Advanced + // Logs Queries](https://cloud.google.com/logging/docs/view/advanced-queries). + // Only log entries that match the filter are returned. An empty filter + // matches all log entries in the resources listed in `resource_names`. + // Referencing a parent resource that is not listed in `resource_names` will + // cause the filter to return no results. The maximum length of the filter is + // 20000 characters. + string filter = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. How the results should be sorted. Presently, the only permitted + // values are `"timestamp asc"` (default) and `"timestamp desc"`. The first + // option returns entries in order of increasing values of + // `LogEntry.timestamp` (oldest first), and the second option returns entries + // in order of decreasing timestamps (newest first). Entries with equal + // timestamps are returned in order of their `insert_id` values. + string order_by = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of results to return from this request. Default is 50. + // If the value is negative or exceeds 1000, the request is rejected. The + // presence of `next_page_token` in the response indicates that more results + // might be available. + int32 page_size = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `page_token` must be the value of + // `next_page_token` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from `ListLogEntries`. +message ListLogEntriesResponse { + // A list of log entries. If `entries` is empty, `nextPageToken` may still be + // returned, indicating that more entries may exist. See `nextPageToken` for + // more information. + repeated LogEntry entries = 1; + + // If there might be more results than those appearing in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + // + // If a value for `next_page_token` appears and the `entries` field is empty, + // it means that the search found no log entries so far but it did not have + // time to search all the possible log entries. Retry the method with this + // value for `page_token` to continue the search. Alternatively, consider + // speeding up the search by changing your filter to specify a single log name + // or resource type, or to narrow the time range of the search. + string next_page_token = 2; +} + +// The parameters to ListMonitoredResourceDescriptors +message ListMonitoredResourceDescriptorsRequest { + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + int32 page_size = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from ListMonitoredResourceDescriptors. +message ListMonitoredResourceDescriptorsResponse { + // A list of resource descriptors. + repeated google.api.MonitoredResourceDescriptor resource_descriptors = 1; + + // If there might be more results than those appearing in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} + +// The parameters to ListLogs. +message ListLogsRequest { + // Required. The resource name that owns the logs: + // + // * `projects/[PROJECT_ID]` + // * `organizations/[ORGANIZATION_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]` + // * `folders/[FOLDER_ID]` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/Log" + } + ]; + + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The resource name that owns the logs: + // + // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // + // To support legacy queries, it could also be: + // + // * `projects/[PROJECT_ID]` + // * `organizations/[ORGANIZATION_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]` + // * `folders/[FOLDER_ID]` + repeated string resource_names = 8 [ + (google.api.field_behavior) = OPTIONAL, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/Log" + } + ]; +} + +// Result returned from ListLogs. +message ListLogsResponse { + // A list of log names. For example, + // `"projects/my-project/logs/syslog"` or + // `"organizations/123/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + repeated string log_names = 3; + + // If there might be more results than those appearing in this response, then + // `nextPageToken` is included. To get the next set of results, call this + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} + +// The parameters to `TailLogEntries`. +message TailLogEntriesRequest { + // Required. Name of a parent resource from which to retrieve log entries: + // + // * `projects/[PROJECT_ID]` + // * `organizations/[ORGANIZATION_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]` + // * `folders/[FOLDER_ID]` + // + // May alternatively be one or more views: + // + // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + repeated string resource_names = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A filter that chooses which log entries to return. See [Advanced + // Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters). + // Only log entries that match the filter are returned. An empty filter + // matches all log entries in the resources listed in `resource_names`. + // Referencing a parent resource that is not in `resource_names` will cause + // the filter to return no results. The maximum length of the filter is 20000 + // characters. + string filter = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The amount of time to buffer log entries at the server before + // being returned to prevent out of order results due to late arriving log + // entries. Valid values are between 0-60000 milliseconds. Defaults to 2000 + // milliseconds. + google.protobuf.Duration buffer_window = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from `TailLogEntries`. +message TailLogEntriesResponse { + // Information about entries that were omitted from the session. + message SuppressionInfo { + // An indicator of why entries were omitted. + enum Reason { + // Unexpected default. + REASON_UNSPECIFIED = 0; + + // Indicates suppression occurred due to relevant entries being + // received in excess of rate limits. For quotas and limits, see + // [Logging API quotas and + // limits](https://cloud.google.com/logging/quotas#api-limits). + RATE_LIMIT = 1; + + // Indicates suppression occurred due to the client not consuming + // responses quickly enough. + NOT_CONSUMED = 2; + } + + // The reason that entries were omitted from the session. + Reason reason = 1; + + // A lower bound on the count of entries omitted due to `reason`. + int32 suppressed_count = 2; + } + + // A list of log entries. Each response in the stream will order entries with + // increasing values of `LogEntry.timestamp`. Ordering is not guaranteed + // between separate responses. + repeated LogEntry entries = 1; + + // If entries that otherwise would have been included in the session were not + // sent back to the client, counts of relevant entries omitted from the + // session with the reason that they were not included. There will be at most + // one of each reason per response. The counts represent the number of + // suppressed entries since the last streamed response. + repeated SuppressionInfo suppression_info = 2; +} diff --git a/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto b/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto new file mode 100644 index 00000000..ef002406 --- /dev/null +++ b/opentelemetry-stackdriver/proto/google/logging/v2/logging_config.proto @@ -0,0 +1,1957 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.logging.v2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.Logging.V2"; +option go_package = "google.golang.org/genproto/googleapis/logging/v2;logging"; +option java_multiple_files = true; +option java_outer_classname = "LoggingConfigProto"; +option java_package = "com.google.logging.v2"; +option php_namespace = "Google\\Cloud\\Logging\\V2"; +option ruby_package = "Google::Cloud::Logging::V2"; +option (google.api.resource_definition) = { + type: "logging.googleapis.com/OrganizationLocation" + pattern: "organizations/{organization}/locations/{location}" +}; +option (google.api.resource_definition) = { + type: "logging.googleapis.com/FolderLocation" + pattern: "folders/{folder}/locations/{location}" +}; +option (google.api.resource_definition) = { + type: "logging.googleapis.com/BillingAccountLocation" + pattern: "billingAccounts/{billing_account}/locations/{location}" +}; + +// Service for configuring sinks used to route log entries. +service ConfigServiceV2 { + option (google.api.default_host) = "logging.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only," + "https://www.googleapis.com/auth/logging.admin," + "https://www.googleapis.com/auth/logging.read"; + + // Lists log buckets. + rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) { + option (google.api.http) = { + get: "/v2/{parent=*/*/locations/*}/buckets" + additional_bindings { + get: "/v2/{parent=projects/*/locations/*}/buckets" + } + additional_bindings { + get: "/v2/{parent=organizations/*/locations/*}/buckets" + } + additional_bindings { + get: "/v2/{parent=folders/*/locations/*}/buckets" + } + additional_bindings { + get: "/v2/{parent=billingAccounts/*/locations/*}/buckets" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Gets a log bucket. + rpc GetBucket(GetBucketRequest) returns (LogBucket) { + option (google.api.http) = { + get: "/v2/{name=*/*/locations/*/buckets/*}" + additional_bindings { + get: "/v2/{name=projects/*/locations/*/buckets/*}" + } + additional_bindings { + get: "/v2/{name=organizations/*/locations/*/buckets/*}" + } + additional_bindings { + get: "/v2/{name=folders/*/locations/*/buckets/*}" + } + additional_bindings { + get: "/v2/{name=billingAccounts/*/buckets/*}" + } + }; + } + + // Creates a log bucket that can be used to store log entries. After a bucket + // has been created, the bucket's location cannot be changed. + rpc CreateBucket(CreateBucketRequest) returns (LogBucket) { + option (google.api.http) = { + post: "/v2/{parent=*/*/locations/*}/buckets" + body: "bucket" + additional_bindings { + post: "/v2/{parent=projects/*/locations/*}/buckets" + body: "bucket" + } + additional_bindings { + post: "/v2/{parent=organizations/*/locations/*}/buckets" + body: "bucket" + } + additional_bindings { + post: "/v2/{parent=folders/*/locations/*}/buckets" + body: "bucket" + } + additional_bindings { + post: "/v2/{parent=billingAccounts/*/locations/*}/buckets" + body: "bucket" + } + }; + } + + // Updates a log bucket. This method replaces the following fields in the + // existing bucket with values from the new bucket: `retention_period` + // + // If the retention period is decreased and the bucket is locked, + // `FAILED_PRECONDITION` will be returned. + // + // If the bucket has a `lifecycle_state` of `DELETE_REQUESTED`, then + // `FAILED_PRECONDITION` will be returned. + // + // After a bucket has been created, the bucket's location cannot be changed. + rpc UpdateBucket(UpdateBucketRequest) returns (LogBucket) { + option (google.api.http) = { + patch: "/v2/{name=*/*/locations/*/buckets/*}" + body: "bucket" + additional_bindings { + patch: "/v2/{name=projects/*/locations/*/buckets/*}" + body: "bucket" + } + additional_bindings { + patch: "/v2/{name=organizations/*/locations/*/buckets/*}" + body: "bucket" + } + additional_bindings { + patch: "/v2/{name=folders/*/locations/*/buckets/*}" + body: "bucket" + } + additional_bindings { + patch: "/v2/{name=billingAccounts/*/locations/*/buckets/*}" + body: "bucket" + } + }; + } + + // Deletes a log bucket. + // + // Changes the bucket's `lifecycle_state` to the `DELETE_REQUESTED` state. + // After 7 days, the bucket will be purged and all log entries in the bucket + // will be permanently deleted. + rpc DeleteBucket(DeleteBucketRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=*/*/locations/*/buckets/*}" + additional_bindings { + delete: "/v2/{name=projects/*/locations/*/buckets/*}" + } + additional_bindings { + delete: "/v2/{name=organizations/*/locations/*/buckets/*}" + } + additional_bindings { + delete: "/v2/{name=folders/*/locations/*/buckets/*}" + } + additional_bindings { + delete: "/v2/{name=billingAccounts/*/locations/*/buckets/*}" + } + }; + } + + // Undeletes a log bucket. A bucket that has been deleted can be undeleted + // within the grace period of 7 days. + rpc UndeleteBucket(UndeleteBucketRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v2/{name=*/*/locations/*/buckets/*}:undelete" + body: "*" + additional_bindings { + post: "/v2/{name=projects/*/locations/*/buckets/*}:undelete" + body: "*" + } + additional_bindings { + post: "/v2/{name=organizations/*/locations/*/buckets/*}:undelete" + body: "*" + } + additional_bindings { + post: "/v2/{name=folders/*/locations/*/buckets/*}:undelete" + body: "*" + } + additional_bindings { + post: "/v2/{name=billingAccounts/*/locations/*/buckets/*}:undelete" + body: "*" + } + }; + } + + // Lists views on a log bucket. + rpc ListViews(ListViewsRequest) returns (ListViewsResponse) { + option (google.api.http) = { + get: "/v2/{parent=*/*/locations/*/buckets/*}/views" + additional_bindings { + get: "/v2/{parent=projects/*/locations/*/buckets/*}/views" + } + additional_bindings { + get: "/v2/{parent=organizations/*/locations/*/buckets/*}/views" + } + additional_bindings { + get: "/v2/{parent=folders/*/locations/*/buckets/*}/views" + } + additional_bindings { + get: "/v2/{parent=billingAccounts/*/locations/*/buckets/*}/views" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Gets a view on a log bucket.. + rpc GetView(GetViewRequest) returns (LogView) { + option (google.api.http) = { + get: "/v2/{name=*/*/locations/*/buckets/*/views/*}" + additional_bindings { + get: "/v2/{name=projects/*/locations/*/buckets/*/views/*}" + } + additional_bindings { + get: "/v2/{name=organizations/*/locations/*/buckets/*/views/*}" + } + additional_bindings { + get: "/v2/{name=folders/*/locations/*/buckets/*/views/*}" + } + additional_bindings { + get: "/v2/{name=billingAccounts/*/buckets/*/views/*}" + } + }; + } + + // Creates a view over log entries in a log bucket. A bucket may contain a + // maximum of 30 views. + rpc CreateView(CreateViewRequest) returns (LogView) { + option (google.api.http) = { + post: "/v2/{parent=*/*/locations/*/buckets/*}/views" + body: "view" + additional_bindings { + post: "/v2/{parent=projects/*/locations/*/buckets/*}/views" + body: "view" + } + additional_bindings { + post: "/v2/{parent=organizations/*/locations/*/buckets/*}/views" + body: "view" + } + additional_bindings { + post: "/v2/{parent=folders/*/locations/*/buckets/*}/views" + body: "view" + } + additional_bindings { + post: "/v2/{parent=billingAccounts/*/locations/*/buckets/*}/views" + body: "view" + } + }; + } + + // Updates a view on a log bucket. This method replaces the following fields + // in the existing view with values from the new view: `filter`. + // If an `UNAVAILABLE` error is returned, this indicates that system is not in + // a state where it can update the view. If this occurs, please try again in a + // few minutes. + rpc UpdateView(UpdateViewRequest) returns (LogView) { + option (google.api.http) = { + patch: "/v2/{name=*/*/locations/*/buckets/*/views/*}" + body: "view" + additional_bindings { + patch: "/v2/{name=projects/*/locations/*/buckets/*/views/*}" + body: "view" + } + additional_bindings { + patch: "/v2/{name=organizations/*/locations/*/buckets/*/views/*}" + body: "view" + } + additional_bindings { + patch: "/v2/{name=folders/*/locations/*/buckets/*/views/*}" + body: "view" + } + additional_bindings { + patch: "/v2/{name=billingAccounts/*/locations/*/buckets/*/views/*}" + body: "view" + } + }; + } + + // Deletes a view on a log bucket. + // If an `UNAVAILABLE` error is returned, this indicates that system is not in + // a state where it can delete the view. If this occurs, please try again in a + // few minutes. + rpc DeleteView(DeleteViewRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=*/*/locations/*/buckets/*/views/*}" + additional_bindings { + delete: "/v2/{name=projects/*/locations/*/buckets/*/views/*}" + } + additional_bindings { + delete: "/v2/{name=organizations/*/locations/*/buckets/*/views/*}" + } + additional_bindings { + delete: "/v2/{name=folders/*/locations/*/buckets/*/views/*}" + } + additional_bindings { + delete: "/v2/{name=billingAccounts/*/locations/*/buckets/*/views/*}" + } + }; + } + + // Lists sinks. + rpc ListSinks(ListSinksRequest) returns (ListSinksResponse) { + option (google.api.http) = { + get: "/v2/{parent=*/*}/sinks" + additional_bindings { + get: "/v2/{parent=projects/*}/sinks" + } + additional_bindings { + get: "/v2/{parent=organizations/*}/sinks" + } + additional_bindings { + get: "/v2/{parent=folders/*}/sinks" + } + additional_bindings { + get: "/v2/{parent=billingAccounts/*}/sinks" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Gets a sink. + rpc GetSink(GetSinkRequest) returns (LogSink) { + option (google.api.http) = { + get: "/v2/{sink_name=*/*/sinks/*}" + additional_bindings { + get: "/v2/{sink_name=projects/*/sinks/*}" + } + additional_bindings { + get: "/v2/{sink_name=organizations/*/sinks/*}" + } + additional_bindings { + get: "/v2/{sink_name=folders/*/sinks/*}" + } + additional_bindings { + get: "/v2/{sink_name=billingAccounts/*/sinks/*}" + } + }; + option (google.api.method_signature) = "sink_name"; + } + + // Creates a sink that exports specified log entries to a destination. The + // export of newly-ingested log entries begins immediately, unless the sink's + // `writer_identity` is not permitted to write to the destination. A sink can + // export log entries only from the resource owning the sink. + rpc CreateSink(CreateSinkRequest) returns (LogSink) { + option (google.api.http) = { + post: "/v2/{parent=*/*}/sinks" + body: "sink" + additional_bindings { + post: "/v2/{parent=projects/*}/sinks" + body: "sink" + } + additional_bindings { + post: "/v2/{parent=organizations/*}/sinks" + body: "sink" + } + additional_bindings { + post: "/v2/{parent=folders/*}/sinks" + body: "sink" + } + additional_bindings { + post: "/v2/{parent=billingAccounts/*}/sinks" + body: "sink" + } + }; + option (google.api.method_signature) = "parent,sink"; + } + + // Updates a sink. This method replaces the following fields in the existing + // sink with values from the new sink: `destination`, and `filter`. + // + // The updated sink might also have a new `writer_identity`; see the + // `unique_writer_identity` field. + rpc UpdateSink(UpdateSinkRequest) returns (LogSink) { + option (google.api.http) = { + put: "/v2/{sink_name=*/*/sinks/*}" + body: "sink" + additional_bindings { + put: "/v2/{sink_name=projects/*/sinks/*}" + body: "sink" + } + additional_bindings { + put: "/v2/{sink_name=organizations/*/sinks/*}" + body: "sink" + } + additional_bindings { + put: "/v2/{sink_name=folders/*/sinks/*}" + body: "sink" + } + additional_bindings { + put: "/v2/{sink_name=billingAccounts/*/sinks/*}" + body: "sink" + } + additional_bindings { + patch: "/v2/{sink_name=projects/*/sinks/*}" + body: "sink" + } + additional_bindings { + patch: "/v2/{sink_name=organizations/*/sinks/*}" + body: "sink" + } + additional_bindings { + patch: "/v2/{sink_name=folders/*/sinks/*}" + body: "sink" + } + additional_bindings { + patch: "/v2/{sink_name=billingAccounts/*/sinks/*}" + body: "sink" + } + }; + option (google.api.method_signature) = "sink_name,sink,update_mask"; + option (google.api.method_signature) = "sink_name,sink"; + } + + // Deletes a sink. If the sink has a unique `writer_identity`, then that + // service account is also deleted. + rpc DeleteSink(DeleteSinkRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{sink_name=*/*/sinks/*}" + additional_bindings { + delete: "/v2/{sink_name=projects/*/sinks/*}" + } + additional_bindings { + delete: "/v2/{sink_name=organizations/*/sinks/*}" + } + additional_bindings { + delete: "/v2/{sink_name=folders/*/sinks/*}" + } + additional_bindings { + delete: "/v2/{sink_name=billingAccounts/*/sinks/*}" + } + }; + option (google.api.method_signature) = "sink_name"; + } + + // Lists all the exclusions on the _Default sink in a parent resource. + rpc ListExclusions(ListExclusionsRequest) returns (ListExclusionsResponse) { + option (google.api.http) = { + get: "/v2/{parent=*/*}/exclusions" + additional_bindings { + get: "/v2/{parent=projects/*}/exclusions" + } + additional_bindings { + get: "/v2/{parent=organizations/*}/exclusions" + } + additional_bindings { + get: "/v2/{parent=folders/*}/exclusions" + } + additional_bindings { + get: "/v2/{parent=billingAccounts/*}/exclusions" + } + }; + option (google.api.method_signature) = "parent"; + } + + // Gets the description of an exclusion in the _Default sink. + rpc GetExclusion(GetExclusionRequest) returns (LogExclusion) { + option (google.api.http) = { + get: "/v2/{name=*/*/exclusions/*}" + additional_bindings { + get: "/v2/{name=projects/*/exclusions/*}" + } + additional_bindings { + get: "/v2/{name=organizations/*/exclusions/*}" + } + additional_bindings { + get: "/v2/{name=folders/*/exclusions/*}" + } + additional_bindings { + get: "/v2/{name=billingAccounts/*/exclusions/*}" + } + }; + option (google.api.method_signature) = "name"; + } + + // Creates a new exclusion in the _Default sink in a specified parent + // resource. Only log entries belonging to that resource can be excluded. You + // can have up to 10 exclusions in a resource. + rpc CreateExclusion(CreateExclusionRequest) returns (LogExclusion) { + option (google.api.http) = { + post: "/v2/{parent=*/*}/exclusions" + body: "exclusion" + additional_bindings { + post: "/v2/{parent=projects/*}/exclusions" + body: "exclusion" + } + additional_bindings { + post: "/v2/{parent=organizations/*}/exclusions" + body: "exclusion" + } + additional_bindings { + post: "/v2/{parent=folders/*}/exclusions" + body: "exclusion" + } + additional_bindings { + post: "/v2/{parent=billingAccounts/*}/exclusions" + body: "exclusion" + } + }; + option (google.api.method_signature) = "parent,exclusion"; + } + + // Changes one or more properties of an existing exclusion in the _Default + // sink. + rpc UpdateExclusion(UpdateExclusionRequest) returns (LogExclusion) { + option (google.api.http) = { + patch: "/v2/{name=*/*/exclusions/*}" + body: "exclusion" + additional_bindings { + patch: "/v2/{name=projects/*/exclusions/*}" + body: "exclusion" + } + additional_bindings { + patch: "/v2/{name=organizations/*/exclusions/*}" + body: "exclusion" + } + additional_bindings { + patch: "/v2/{name=folders/*/exclusions/*}" + body: "exclusion" + } + additional_bindings { + patch: "/v2/{name=billingAccounts/*/exclusions/*}" + body: "exclusion" + } + }; + option (google.api.method_signature) = "name,exclusion,update_mask"; + } + + // Deletes an exclusion in the _Default sink. + rpc DeleteExclusion(DeleteExclusionRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=*/*/exclusions/*}" + additional_bindings { + delete: "/v2/{name=projects/*/exclusions/*}" + } + additional_bindings { + delete: "/v2/{name=organizations/*/exclusions/*}" + } + additional_bindings { + delete: "/v2/{name=folders/*/exclusions/*}" + } + additional_bindings { + delete: "/v2/{name=billingAccounts/*/exclusions/*}" + } + }; + option (google.api.method_signature) = "name"; + } + + // Gets the Logging CMEK settings for the given resource. + // + // Note: CMEK for the Log Router can be configured for Google Cloud projects, + // folders, organizations and billing accounts. Once configured for an + // organization, it applies to all projects and folders in the Google Cloud + // organization. + // + // See [Enabling CMEK for Log + // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) + // for more information. + rpc GetCmekSettings(GetCmekSettingsRequest) returns (CmekSettings) { + option (google.api.http) = { + get: "/v2/{name=*/*}/cmekSettings" + additional_bindings { + get: "/v2/{name=projects/*}/cmekSettings" + } + additional_bindings { + get: "/v2/{name=organizations/*}/cmekSettings" + } + additional_bindings { + get: "/v2/{name=folders/*}/cmekSettings" + } + additional_bindings { + get: "/v2/{name=billingAccounts/*}/cmekSettings" + } + }; + } + + // Updates the Log Router CMEK settings for the given resource. + // + // Note: CMEK for the Log Router can currently only be configured for Google + // Cloud organizations. Once configured, it applies to all projects and + // folders in the Google Cloud organization. + // + // [UpdateCmekSettings][google.logging.v2.ConfigServiceV2.UpdateCmekSettings] + // will fail if 1) `kms_key_name` is invalid, or 2) the associated service + // account does not have the required + // `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or + // 3) access to the key is disabled. + // + // See [Enabling CMEK for Log + // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) + // for more information. + rpc UpdateCmekSettings(UpdateCmekSettingsRequest) returns (CmekSettings) { + option (google.api.http) = { + patch: "/v2/{name=*/*}/cmekSettings" + body: "cmek_settings" + additional_bindings { + patch: "/v2/{name=organizations/*}/cmekSettings" + body: "cmek_settings" + } + }; + } + + // Gets the Log Router settings for the given resource. + // + // Note: Settings for the Log Router can be get for Google Cloud projects, + // folders, organizations and billing accounts. Currently it can only be + // configured for organizations. Once configured for an organization, it + // applies to all projects and folders in the Google Cloud organization. + // + // See [Enabling CMEK for Log + // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) + // for more information. + rpc GetSettings(GetSettingsRequest) returns (Settings) { + option (google.api.http) = { + get: "/v2/{name=*/*}/settings" + additional_bindings { + get: "/v2/{name=projects/*}/settings" + } + additional_bindings { + get: "/v2/{name=organizations/*}/settings" + } + additional_bindings { + get: "/v2/{name=folders/*}/settings" + } + additional_bindings { + get: "/v2/{name=billingAccounts/*}/settings" + } + }; + option (google.api.method_signature) = "name"; + } + + // Updates the Log Router settings for the given resource. + // + // Note: Settings for the Log Router can currently only be configured for + // Google Cloud organizations. Once configured, it applies to all projects and + // folders in the Google Cloud organization. + // + // [UpdateSettings][google.logging.v2.ConfigServiceV2.UpdateSettings] + // will fail if 1) `kms_key_name` is invalid, or 2) the associated service + // account does not have the required + // `roles/cloudkms.cryptoKeyEncrypterDecrypter` role assigned for the key, or + // 3) access to the key is disabled. 4) `location_id` is not supported by + // Logging. 5) `location_id` violate OrgPolicy. + // + // See [Enabling CMEK for Log + // Router](https://cloud.google.com/logging/docs/routing/managed-encryption) + // for more information. + rpc UpdateSettings(UpdateSettingsRequest) returns (Settings) { + option (google.api.http) = { + patch: "/v2/{name=*/*}/settings" + body: "settings" + additional_bindings { + patch: "/v2/{name=organizations/*}/settings" + body: "settings" + } + additional_bindings { + patch: "/v2/{name=folders/*}/settings" + body: "settings" + } + }; + option (google.api.method_signature) = "settings,update_mask"; + } + + // Copies a set of log entries from a log bucket to a Cloud Storage bucket. + rpc CopyLogEntries(CopyLogEntriesRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/entries:copy" + body: "*" + }; + option (google.longrunning.operation_info) = { + response_type: "CopyLogEntriesResponse" + metadata_type: "CopyLogEntriesMetadata" + }; + } +} + +// Describes a repository in which log entries are stored. +message LogBucket { + option (google.api.resource) = { + type: "logging.googleapis.com/LogBucket" + pattern: "projects/{project}/locations/{location}/buckets/{bucket}" + pattern: "organizations/{organization}/locations/{location}/buckets/{bucket}" + pattern: "folders/{folder}/locations/{location}/buckets/{bucket}" + pattern: "billingAccounts/{billing_account}/locations/{location}/buckets/{bucket}" + }; + + // Output only. The resource name of the bucket. + // + // For example: + // + // `projects/my-project/locations/global/buckets/my-bucket` + // + // For a list of supported locations, see [Supported + // Regions](https://cloud.google.com/logging/docs/region-support) + // + // For the location of `global` it is unspecified where log entries are + // actually stored. + // + // After a bucket has been created, the location cannot be changed. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Describes this bucket. + string description = 3; + + // Output only. The creation timestamp of the bucket. This is not set for any of the + // default buckets. + google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The last update timestamp of the bucket. + google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Logs will be retained by default for this amount of time, after which they + // will automatically be deleted. The minimum retention period is 1 day. If + // this value is set to zero at bucket creation time, the default time of 30 + // days will be used. + int32 retention_days = 11; + + // Whether the bucket is locked. + // + // The retention period on a locked bucket cannot be changed. Locked buckets + // may only be deleted if they are empty. + bool locked = 9; + + // Output only. The bucket lifecycle state. + LifecycleState lifecycle_state = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Log entry field paths that are denied access in this bucket. + // + // The following fields and their children are eligible: `textPayload`, + // `jsonPayload`, `protoPayload`, `httpRequest`, `labels`, `sourceLocation`. + // + // Restricting a repeated field will restrict all values. Adding a parent will + // block all child fields. (e.g. `foo.bar` will block `foo.bar.baz`) + repeated string restricted_fields = 15; + + // The CMEK settings of the log bucket. If present, new log entries written to + // this log bucket are encrypted using the CMEK key provided in this + // configuration. If a log bucket has CMEK settings, the CMEK settings cannot + // be disabled later by updating the log bucket. Changing the KMS key is + // allowed. + CmekSettings cmek_settings = 19; +} + +// Describes a view over log entries in a bucket. +message LogView { + option (google.api.resource) = { + type: "logging.googleapis.com/LogView" + pattern: "projects/{project}/locations/{location}/buckets/{bucket}/views/{view}" + pattern: "organizations/{organization}/locations/{location}/buckets/{bucket}/views/{view}" + pattern: "folders/{folder}/locations/{location}/buckets/{bucket}/views/{view}" + pattern: "billingAccounts/{billing_account}/locations/{location}/buckets/{bucket}/views/{view}" + }; + + // The resource name of the view. + // + // For example: + // + // `projects/my-project/locations/global/buckets/my-bucket/views/my-view` + string name = 1; + + // Describes this view. + string description = 3; + + // Output only. The creation timestamp of the view. + google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The last update timestamp of the view. + google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Filter that restricts which log entries in a bucket are visible in this + // view. + // + // Filters are restricted to be a logical AND of ==/!= of any of the + // following: + // + // - originating project/folder/organization/billing account. + // - resource type + // - log id + // + // For example: + // + // SOURCE("projects/myproject") AND resource.type = "gce_instance" + // AND LOG_ID("stdout") + string filter = 7; +} + +// Describes a sink used to export log entries to one of the following +// destinations in any project: a Cloud Storage bucket, a BigQuery dataset, a +// Pub/Sub topic or a Cloud Logging log bucket. A logs filter controls which log +// entries are exported. The sink must be created within a project, +// organization, billing account, or folder. +message LogSink { + option (google.api.resource) = { + type: "logging.googleapis.com/LogSink" + pattern: "projects/{project}/sinks/{sink}" + pattern: "organizations/{organization}/sinks/{sink}" + pattern: "folders/{folder}/sinks/{sink}" + pattern: "billingAccounts/{billing_account}/sinks/{sink}" + }; + + // Deprecated. This is unused. + enum VersionFormat { + // An unspecified format version that will default to V2. + VERSION_FORMAT_UNSPECIFIED = 0; + + // `LogEntry` version 2 format. + V2 = 1; + + // `LogEntry` version 1 format. + V1 = 2; + } + + // Required. The client-assigned sink identifier, unique within the project. + // + // For example: `"my-syslog-errors-to-pubsub"`. Sink identifiers are limited + // to 100 characters and can include only the following characters: upper and + // lower-case alphanumeric characters, underscores, hyphens, and periods. + // First character has to be alphanumeric. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The export destination: + // + // "storage.googleapis.com/[GCS_BUCKET]" + // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" + // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" + // + // The sink's `writer_identity`, set when the sink is created, must have + // permission to write to the destination or else the log entries are not + // exported. For more information, see + // [Exporting Logs with + // Sinks](https://cloud.google.com/logging/docs/api/tasks/exporting-logs). + string destination = 3 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "*" + } + ]; + + // Optional. An [advanced logs + // filter](https://cloud.google.com/logging/docs/view/advanced-queries). The + // only exported log entries are those that are in the resource owning the + // sink and that match the filter. + // + // For example: + // + // `logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR` + string filter = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A description of this sink. + // + // The maximum length of the description is 8000 characters. + string description = 18 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to true, then this sink is disabled and it does not export any log + // entries. + bool disabled = 19 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Log entries that match any of these exclusion filters will not be exported. + // + // If a log entry is matched by both `filter` and one of `exclusion_filters` + // it will not be exported. + repeated LogExclusion exclusions = 16 [(google.api.field_behavior) = OPTIONAL]; + + // Deprecated. This field is unused. + VersionFormat output_version_format = 6 [deprecated = true]; + + // Output only. An IAM identity—a service account or group—under which Cloud + // Logging writes the exported log entries to the sink's destination. This + // field is set by + // [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] and + // [sinks.update][google.logging.v2.ConfigServiceV2.UpdateSink] based on the + // value of `unique_writer_identity` in those methods. + // + // Until you grant this identity write-access to the destination, log entry + // exports from this sink will fail. For more information, see [Granting + // Access for a + // Resource](https://cloud.google.com/iam/docs/granting-roles-to-service-accounts#granting_access_to_a_service_account_for_a_resource). + // Consult the destination service's documentation to determine the + // appropriate IAM roles to assign to the identity. + // + // Sinks that have a destination that is a log bucket in the same project as + // the sink do not have a writer_identity and no additional permissions are + // required. + string writer_identity = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. This field applies only to sinks owned by organizations and folders. If the + // field is false, the default, only the logs owned by the sink's parent + // resource are available for export. If the field is true, then log entries + // from all the projects, folders, and billing accounts contained in the + // sink's parent resource are also available for export. Whether a particular + // log entry from the children is exported depends on the sink's filter + // expression. + // + // For example, if this field is true, then the filter + // `resource.type=gce_instance` would export all Compute Engine VM instance + // log entries from all projects in the sink's parent. + // + // To only export entries from certain child projects, filter on the project + // part of the log name: + // + // logName:("projects/test-project1/" OR "projects/test-project2/") AND + // resource.type=gce_instance + bool include_children = 9 [(google.api.field_behavior) = OPTIONAL]; + + // Destination dependent options. + oneof options { + // Optional. Options that affect sinks exporting data to BigQuery. + BigQueryOptions bigquery_options = 12 [(google.api.field_behavior) = OPTIONAL]; + } + + // Output only. The creation timestamp of the sink. + // + // This field may not be present for older sinks. + google.protobuf.Timestamp create_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The last update timestamp of the sink. + // + // This field may not be present for older sinks. + google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Options that change functionality of a sink exporting data to BigQuery. +message BigQueryOptions { + // Optional. Whether to use [BigQuery's partition + // tables](https://cloud.google.com/bigquery/docs/partitioned-tables). By + // default, Cloud Logging creates dated tables based on the log entries' + // timestamps, e.g. syslog_20170523. With partitioned tables the date suffix + // is no longer present and [special query + // syntax](https://cloud.google.com/bigquery/docs/querying-partitioned-tables) + // has to be used instead. In both cases, tables are sharded based on UTC + // timezone. + bool use_partitioned_tables = 1 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. True if new timestamp column based partitioning is in use, false if legacy + // ingestion-time partitioning is in use. + // + // All new sinks will have this field set true and will use timestamp column + // based partitioning. If use_partitioned_tables is false, this value has no + // meaning and will be false. Legacy sinks using partitioned tables will have + // this field set to false. + bool uses_timestamp_column_partitioning = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The parameters to `ListBuckets`. +message ListBucketsRequest { + // Required. The parent resource whose buckets are to be listed: + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]" + // + // Note: The locations portion of the resource must be specified, but + // supplying the character `-` in place of [LOCATION_ID] will return all + // buckets. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/LogBucket" + } + ]; + + // Optional. If present, then retrieve the next batch of results from the preceding call + // to this method. `pageToken` must be the value of `nextPageToken` from the + // previous response. The values of other method parameters should be + // identical to those in the previous call. + string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of results to return from this request. Non-positive + // values are ignored. The presence of `nextPageToken` in the response + // indicates that more results might be available. + int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The response from ListBuckets. +message ListBucketsResponse { + // A list of buckets. + repeated LogBucket buckets = 1; + + // If there might be more results than appear in this response, then + // `nextPageToken` is included. To get the next set of results, call the same + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} + +// The parameters to `CreateBucket`. +message CreateBucketRequest { + // Required. The resource in which to create the log bucket: + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" + // + // For example: + // + // `"projects/my-project/locations/global"` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/LogBucket" + } + ]; + + // Required. A client-assigned identifier such as `"my-bucket"`. Identifiers are limited + // to 100 characters and can include only letters, digits, underscores, + // hyphens, and periods. + string bucket_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The new bucket. The region specified in the new bucket must be compliant + // with any Location Restriction Org Policy. The name field in the bucket is + // ignored. + LogBucket bucket = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// The parameters to `UpdateBucket`. +message UpdateBucketRequest { + // Required. The full resource name of the bucket to update. + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket"` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogBucket" + } + ]; + + // Required. The updated bucket. + LogBucket bucket = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Field mask that specifies the fields in `bucket` that need an update. A + // bucket field will be overwritten if, and only if, it is in the update mask. + // `name` and output only fields cannot be updated. + // + // For a detailed `FieldMask` definition, see: + // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask + // + // For example: `updateMask=retention_days` + google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = REQUIRED]; +} + +// The parameters to `GetBucket`. +message GetBucketRequest { + // Required. The resource name of the bucket: + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket"` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogBucket" + } + ]; +} + +// The parameters to `DeleteBucket`. +message DeleteBucketRequest { + // Required. The full resource name of the bucket to delete. + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket"` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogBucket" + } + ]; +} + +// The parameters to `UndeleteBucket`. +message UndeleteBucketRequest { + // Required. The full resource name of the bucket to undelete. + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket"` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogBucket" + } + ]; +} + +// The parameters to `ListViews`. +message ListViewsRequest { + // Required. The bucket whose views are to be listed: + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. If present, then retrieve the next batch of results from the preceding call + // to this method. `pageToken` must be the value of `nextPageToken` from the + // previous response. The values of other method parameters should be + // identical to those in the previous call. + string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of results to return from this request. + // + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The response from ListViews. +message ListViewsResponse { + // A list of views. + repeated LogView views = 1; + + // If there might be more results than appear in this response, then + // `nextPageToken` is included. To get the next set of results, call the same + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} + +// The parameters to `CreateView`. +message CreateViewRequest { + // Required. The bucket in which to create the view + // + // `"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]"` + // + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket"` + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The id to use for this view. + string view_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The new view. + LogView view = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// The parameters to `UpdateView`. +message UpdateViewRequest { + // Required. The full resource name of the view to update + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" + // + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The updated view. + LogView view = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Field mask that specifies the fields in `view` that need + // an update. A field will be overwritten if, and only if, it is + // in the update mask. `name` and output only fields cannot be updated. + // + // For a detailed `FieldMask` definition, see + // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask + // + // For example: `updateMask=filter` + google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// The parameters to `GetView`. +message GetViewRequest { + // Required. The resource name of the policy: + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" + // + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogView" + } + ]; +} + +// The parameters to `DeleteView`. +message DeleteViewRequest { + // Required. The full resource name of the view to delete: + // + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" + // + // For example: + // + // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogView" + } + ]; +} + +// The parameters to `ListSinks`. +message ListSinksRequest { + // Required. The parent resource whose sinks are to be listed: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/LogSink" + } + ]; + + // Optional. If present, then retrieve the next batch of results from the + // preceding call to this method. `pageToken` must be the value of + // `nextPageToken` from the previous response. The values of other method + // parameters should be identical to those in the previous call. + string page_token = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The maximum number of results to return from this request. + // Non-positive values are ignored. The presence of `nextPageToken` in the + // response indicates that more results might be available. + int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// Result returned from `ListSinks`. +message ListSinksResponse { + // A list of sinks. + repeated LogSink sinks = 1; + + // If there might be more results than appear in this response, then + // `nextPageToken` is included. To get the next set of results, call the same + // method again using the value of `nextPageToken` as `pageToken`. + string next_page_token = 2; +} + +// The parameters to `GetSink`. +message GetSinkRequest { + // Required. The resource name of the sink: + // + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // + // For example: + // + // `"projects/my-project/sinks/my-sink"` + string sink_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogSink" + } + ]; +} + +// The parameters to `CreateSink`. +message CreateSinkRequest { + // Required. The resource in which to create the sink: + // + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" + // + // For examples: + // + // `"projects/my-project"` + // `"organizations/123456789"` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "logging.googleapis.com/LogSink" + } + ]; + + // Required. The new sink, whose `name` parameter is a sink identifier that + // is not already in use. + LogSink sink = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Determines the kind of IAM identity returned as `writer_identity` + // in the new sink. If this value is omitted or set to false, and if the + // sink's parent is a project, then the value returned as `writer_identity` is + // the same group or service account used by Cloud Logging before the addition + // of writer identities to this API. The sink's destination must be in the + // same project as the sink itself. + // + // If this field is set to true, or if the sink is owned by a non-project + // resource such as an organization, then the value of `writer_identity` will + // be a unique service account used only for exports from the new sink. For + // more information, see `writer_identity` in [LogSink][google.logging.v2.LogSink]. + bool unique_writer_identity = 3 [(google.api.field_behavior) = OPTIONAL]; +} + +// The parameters to `UpdateSink`. +message UpdateSinkRequest { + // Required. The full resource name of the sink to update, including the parent + // resource and the sink identifier: + // + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // + // For example: + // + // `"projects/my-project/sinks/my-sink"` + string sink_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogSink" + } + ]; + + // Required. The updated sink, whose name is the same identifier that appears as part + // of `sink_name`. + LogSink sink = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. See [sinks.create][google.logging.v2.ConfigServiceV2.CreateSink] + // for a description of this field. When updating a sink, the effect of this + // field on the value of `writer_identity` in the updated sink depends on both + // the old and new values of this field: + // + // + If the old and new values of this field are both false or both true, + // then there is no change to the sink's `writer_identity`. + // + If the old value is false and the new value is true, then + // `writer_identity` is changed to a unique service account. + // + It is an error if the old value is true and the new value is + // set to false or defaulted to false. + bool unique_writer_identity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Field mask that specifies the fields in `sink` that need + // an update. A sink field will be overwritten if, and only if, it is + // in the update mask. `name` and output only fields cannot be updated. + // + // An empty `updateMask` is temporarily treated as using the following mask + // for backwards compatibility purposes: + // + // `destination,filter,includeChildren` + // + // At some point in the future, behavior will be removed and specifying an + // empty `updateMask` will be an error. + // + // For a detailed `FieldMask` definition, see + // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.FieldMask + // + // For example: `updateMask=filter` + google.protobuf.FieldMask update_mask = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// The parameters to `DeleteSink`. +message DeleteSinkRequest { + // Required. The full resource name of the sink to delete, including the parent + // resource and the sink identifier: + // + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // + // For example: + // + // `"projects/my-project/sinks/my-sink"` + string sink_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "logging.googleapis.com/LogSink" + } + ]; +} + +// Specifies a set of log entries that are filtered out by a sink. If +// your Google Cloud resource receives a large volume of log entries, you can +// use exclusions to reduce your chargeable logs. Note that exclusions on +// organization-level and folder-level sinks don't apply to child resources. +// Note also that you cannot modify the _Required sink or exclude logs from it. +message LogExclusion { + option (google.api.resource) = { + type: "logging.googleapis.com/LogExclusion" + pattern: "projects/{project}/exclusions/{exclusion}" + pattern: "organizations/{organization}/exclusions/{exclusion}" + pattern: "folders/{folder}/exclusions/{exclusion}" + pattern: "billingAccounts/{billing_account}/exclusions/{exclusion}" + }; + + // Required. A client-assigned identifier, such as `"load-balancer-exclusion"`. + // Identifiers are limited to 100 characters and can include only letters, + // digits, underscores, hyphens, and periods. First character has to be + // alphanumeric. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. A description of this exclusion. + string description = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Required. An [advanced logs + // filter](https://cloud.google.com/logging/docs/view/advanced-queries) that + // matches the log entries to be excluded. By using the [sample + // function](https://cloud.google.com/logging/docs/view/advanced-queries#sample), + // you can exclude less than 100% of the matching log entries. + // + // For example, the following query matches 99% of low-severity log entries + // from Google Cloud Storage buckets: + // + // `resource.type=gcs_bucket severity>, + pending_count: Arc, + maximum_shutdown_duration: Duration, +} + +impl StackDriverExporter { + pub fn builder() -> Builder { + Builder::default() + } + + pub fn pending_count(&self) -> usize { + self.pending_count.load(Ordering::Relaxed) + } +} + +impl SpanExporter for StackDriverExporter { + fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { + match self.tx.try_send(batch) { + Err(e) => Box::pin(std::future::ready(Err(TraceError::Other(Box::new( + e.into_send_error(), + ))))), + Ok(()) => { + self.pending_count.fetch_add(1, Ordering::Relaxed); + Box::pin(std::future::ready(Ok(()))) + } + } + } + + fn shutdown(&mut self) { + let start = Instant::now(); + while (Instant::now() - start) < self.maximum_shutdown_duration && self.pending_count() > 0 + { + std::thread::yield_now(); + // Spin for a bit and give the inner export some time to upload, with a timeout. + } + } +} + +impl fmt::Debug for StackDriverExporter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + #[allow(clippy::unneeded_field_pattern)] + let Self { + tx: _, + pending_count, + maximum_shutdown_duration, + } = self; + f.debug_struct("StackDriverExporter") + .field("tx", &"(elided)") + .field("pending_count", pending_count) + .field("maximum_shutdown_duration", maximum_shutdown_duration) + .finish() + } +} + +/// Helper type to build a `StackDriverExporter`. +#[derive(Clone, Default)] +pub struct Builder { + maximum_shutdown_duration: Option, + num_concurrent_requests: Option, + log_context: Option, +} + +impl Builder { + /// Set the number of concurrent requests to send to StackDriver. + pub fn maximum_shutdown_duration(mut self, duration: Duration) -> Self { + self.maximum_shutdown_duration = Some(duration); + self + } + + /// Set the number of concurrent requests. + /// + /// If `num_concurrent_requests` is set to `0` or `None` then no limit is enforced. + pub fn num_concurrent_requests(mut self, num_concurrent_requests: usize) -> Self { + self.num_concurrent_requests = Some(num_concurrent_requests); + self + } + + /// Enable writing log entries with the given `log_context`. + pub fn log_context(mut self, log_context: LogContext) -> Self { + self.log_context = Some(log_context); + self + } + + pub async fn build( + self, + authenticator: A, + ) -> Result<(StackDriverExporter, impl Future), Error> + where + Error: From, + { + let Self { + maximum_shutdown_duration, + num_concurrent_requests, + log_context, + } = self; + let uri = http::uri::Uri::from_static("https://cloudtrace.googleapis.com:443"); + + let trace_channel = Channel::builder(uri) + .tls_config(ClientTlsConfig::new()) + .map_err(|e| Error::Transport(e.into()))? + .connect() + .await + .map_err(|e| Error::Transport(e.into()))?; + + let log_client = match log_context { + Some(log_context) => { + let log_channel = Channel::builder(http::uri::Uri::from_static( + "https://logging.googleapis.com:443", + )) + .tls_config(ClientTlsConfig::new()) + .map_err(|e| Error::Transport(e.into()))? + .connect() + .await + .map_err(|e| Error::Transport(e.into()))?; + + Some(LogClient { + client: LoggingServiceV2Client::new(log_channel), + context: Arc::new(InternalLogContext::from(log_context)), + }) + } + None => None, + }; + + let (tx, rx) = futures_channel::mpsc::channel(64); + let pending_count = Arc::new(AtomicUsize::new(0)); + let scopes = Arc::new(match log_client { + Some(_) => vec![TRACE_APPEND, LOGGING_WRITE], + None => vec![TRACE_APPEND], + }); + + let count_clone = pending_count.clone(); + let future = async move { + let trace_client = TraceServiceClient::new(trace_channel); + let authorizer = &authenticator; + let log_client = log_client.clone(); + rx.for_each_concurrent(num_concurrent_requests, move |batch| { + let trace_client = trace_client.clone(); + let log_client = log_client.clone(); + let pending_count = count_clone.clone(); + let scopes = scopes.clone(); + ExporterContext { + trace_client, + log_client, + authorizer, + pending_count, + scopes, + } + .export(batch) + }) + .await + }; + + let exporter = StackDriverExporter { + tx, + pending_count, + maximum_shutdown_duration: maximum_shutdown_duration + .unwrap_or_else(|| Duration::from_secs(5)), + }; + + Ok((exporter, future)) + } +} + +struct ExporterContext<'a, A> { + trace_client: TraceServiceClient, + log_client: Option, + authorizer: &'a A, + pending_count: Arc, + scopes: Arc>, +} + +impl ExporterContext<'_, A> +where + Error: From, +{ + async fn export(mut self, batch: Vec) { + use proto::devtools::cloudtrace::v2::span::time_event::Value; + + let mut entries = Vec::new(); + let mut spans = Vec::with_capacity(batch.len()); + for span in batch { + let trace_id = hex::encode(span.span_context.trace_id().to_bytes()); + let span_id = hex::encode(span.span_context.span_id().to_bytes()); + let time_event = match &self.log_client { + None => span + .events + .into_iter() + .map(|event| TimeEvent { + time: Some(event.timestamp.into()), + value: Some(Value::Annotation(Annotation { + description: Some(to_truncate(event.name.into_owned())), + ..Default::default() + })), + }) + .collect(), + Some(client) => { + entries.extend(span.events.into_iter().map(|event| { + let (mut level, mut target, mut labels) = + (LogSeverity::Default, None, HashMap::default()); + for kv in event.attributes { + match kv.key.as_str() { + "level" => { + level = match kv.value.as_str().as_ref() { + "DEBUG" | "TRACE" => LogSeverity::Debug, + "INFO" => LogSeverity::Info, + "WARN" => LogSeverity::Warning, + "ERROR" => LogSeverity::Error, + _ => LogSeverity::Default, // tracing::Level is limited to the above 5 + } + } + "target" => target = Some(kv.value.as_str().into_owned()), + key => { + labels.insert(key.to_owned(), kv.value.as_str().into_owned()); + } + } + } + let project_id = self.authorizer.project_id(); + let log_id = &client.context.log_id; + LogEntry { + log_name: format!("projects/{project_id}/logs/{log_id}"), + resource: Some(client.context.resource.clone()), + severity: level as i32, + timestamp: Some(event.timestamp.into()), + labels, + trace: format!("projects/{project_id}/traces/{trace_id}"), + span_id: span_id.clone(), + source_location: target.map(|target| LogEntrySourceLocation { + file: String::new(), + line: 0, + function: target, + }), + payload: Some(Payload::TextPayload(event.name.into_owned())), + // severity, source_location, text_payload + ..Default::default() + } + })); + + vec![] + } + }; + + spans.push(Span { + name: format!( + "projects/{}/traces/{}/spans/{}", + self.authorizer.project_id(), + hex::encode(span.span_context.trace_id().to_bytes()), + hex::encode(span.span_context.span_id().to_bytes()) + ), + display_name: Some(to_truncate(span.name.into_owned())), + span_id: hex::encode(span.span_context.span_id().to_bytes()), + // From the API docs: If this is a root span, + // then this field must be empty. + parent_span_id: match span.parent_span_id { + SpanId::INVALID => "".to_owned(), + _ => hex::encode(span.parent_span_id.to_bytes()), + }, + start_time: Some(span.start_time.into()), + end_time: Some(span.end_time.into()), + attributes: Some((span.attributes, span.resource.as_ref()).into()), + time_events: Some(TimeEvents { + time_event, + ..Default::default() + }), + links: transform_links(&span.links), + status: status(span.status), + span_kind: SpanKind::from(span.span_kind) as i32, + ..Default::default() + }); + } + + let mut req = Request::new(BatchWriteSpansRequest { + name: format!("projects/{}", self.authorizer.project_id()), + spans, + }); + + self.pending_count.fetch_sub(1, Ordering::Relaxed); + if let Err(e) = self.authorizer.authorize(&mut req, &self.scopes).await { + handle_error(TraceError::from(Error::Authorizer(e.into()))); + } else if let Err(e) = self.trace_client.batch_write_spans(req).await { + handle_error(TraceError::from(Error::Transport(e.into()))); + } + + let client = match &mut self.log_client { + Some(client) => client, + None => return, + }; + + let mut req = Request::new(WriteLogEntriesRequest { + log_name: format!( + "projects/{}/logs/{}", + self.authorizer.project_id(), + client.context.log_id, + ), + entries, + dry_run: false, + labels: HashMap::default(), + partial_success: true, + resource: None, + }); + + if let Err(e) = self.authorizer.authorize(&mut req, &self.scopes).await { + handle_error(TraceError::from(Error::from(e))); + } else if let Err(e) = client.client.write_log_entries(req).await { + handle_error(TraceError::from(Error::Transport(e.into()))); + } + } +} + +#[cfg(feature = "yup-authorizer")] +pub struct YupAuthorizer { + authenticator: Authenticator>, + project_id: String, +} + +#[cfg(feature = "yup-authorizer")] +impl YupAuthorizer { + pub async fn new( + credentials_path: impl AsRef, + persistent_token_file: impl Into>, + ) -> Result { + let service_account_key = yup_oauth2::read_service_account_key(&credentials_path).await?; + let project_id = service_account_key + .project_id + .as_ref() + .ok_or_else(|| Error::Other("project_id is missing".into()))? + .clone(); + let mut authenticator = + yup_oauth2::ServiceAccountAuthenticator::builder(service_account_key); + if let Some(persistent_token_file) = persistent_token_file.into() { + authenticator = authenticator.persist_tokens_to_disk(persistent_token_file); + } + + Ok(Self { + authenticator: authenticator.build().await?, + project_id, + }) + } +} + +#[cfg(feature = "yup-authorizer")] +#[async_trait] +impl Authorizer for YupAuthorizer { + type Error = Error; + + fn project_id(&self) -> &str { + &self.project_id + } + + async fn authorize( + &self, + req: &mut Request, + scopes: &[&str], + ) -> Result<(), Self::Error> { + let token = self + .authenticator + .token(scopes) + .await + .map_err(|e| Error::Authorizer(e.into()))?; + + let token = match token.token() { + Some(token) => token, + None => return Err(Error::Other("unable to access token contents".into())), + }; + + req.metadata_mut().insert( + "authorization", + MetadataValue::try_from(format!("Bearer {}", token)).unwrap(), + ); + Ok(()) + } +} + +#[cfg(feature = "gcp_auth")] +pub struct GcpAuthorizer { + manager: gcp_auth::AuthenticationManager, + project_id: String, +} + +#[cfg(feature = "gcp_auth")] +impl GcpAuthorizer { + pub async fn new() -> Result { + let manager = gcp_auth::AuthenticationManager::new() + .await + .map_err(|e| Error::Authorizer(e.into()))?; + + let project_id = manager + .project_id() + .await + .map_err(|e| Error::Authorizer(e.into()))?; + + Ok(Self { + manager, + project_id, + }) + } + pub fn from_gcp_auth(manager: gcp_auth::AuthenticationManager, project_id: String) -> Self { + Self { + manager, + project_id, + } + } +} + +#[cfg(feature = "gcp_auth")] +#[async_trait] +impl Authorizer for GcpAuthorizer { + type Error = Error; + + fn project_id(&self) -> &str { + &self.project_id + } + + async fn authorize( + &self, + req: &mut Request, + scopes: &[&str], + ) -> Result<(), Self::Error> { + let token = self + .manager + .get_token(scopes) + .await + .map_err(|e| Error::Authorizer(e.into()))?; + + req.metadata_mut().insert( + "authorization", + MetadataValue::try_from(format!("Bearer {}", token.as_str())).unwrap(), + ); + + Ok(()) + } +} + +#[async_trait] +pub trait Authorizer: Sync + Send + 'static { + type Error: std::error::Error + fmt::Debug + Send + Sync; + + fn project_id(&self) -> &str; + async fn authorize( + &self, + request: &mut Request, + scopes: &[&str], + ) -> Result<(), Self::Error>; +} + +impl From for AttributeValue { + fn from(v: Value) -> AttributeValue { + use proto::devtools::cloudtrace::v2::attribute_value; + let new_value = match v { + Value::Bool(v) => attribute_value::Value::BoolValue(v), + Value::F64(v) => attribute_value::Value::StringValue(to_truncate(v.to_string())), + Value::I64(v) => attribute_value::Value::IntValue(v), + Value::String(v) => attribute_value::Value::StringValue(to_truncate(v.to_string())), + Value::Array(_) => attribute_value::Value::StringValue(to_truncate(v.to_string())), + }; + AttributeValue { + value: Some(new_value), + } + } +} + +fn to_truncate(s: String) -> TruncatableString { + TruncatableString { + value: s, + ..Default::default() + } +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("authorizer error: {0}")] + Authorizer(#[source] Box), + #[error("I/O error: {0}")] + Io(#[from] std::io::Error), + #[error("{0}")] + Other(#[from] Box), + #[error("tonic error: {0}")] + Transport(#[source] Box), +} + +impl ExportError for Error { + fn exporter_name(&self) -> &'static str { + "stackdriver" + } +} + +/// As defined in https://cloud.google.com/logging/docs/reference/v2/rpc/google.logging.type#google.logging.type.LogSeverity. +enum LogSeverity { + Default = 0, + Debug = 100, + Info = 200, + Warning = 400, + Error = 500, +} + +#[derive(Clone)] +struct LogClient { + client: LoggingServiceV2Client, + context: Arc, +} + +struct InternalLogContext { + log_id: String, + resource: proto::api::MonitoredResource, +} + +#[derive(Clone)] +pub struct LogContext { + pub log_id: String, + pub resource: MonitoredResource, +} + +impl From for InternalLogContext { + fn from(cx: LogContext) -> Self { + let mut labels = HashMap::default(); + let resource = match cx.resource { + MonitoredResource::CloudRunRevision { + project_id, + service_name, + revision_name, + location, + configuration_name, + } => { + labels.insert("project_id".to_string(), project_id); + if let Some(service_name) = service_name { + labels.insert("service_name".to_string(), service_name); + } + if let Some(revision_name) = revision_name { + labels.insert("revision_name".to_string(), revision_name); + } + if let Some(location) = location { + labels.insert("location".to_string(), location); + } + if let Some(configuration_name) = configuration_name { + labels.insert("configuration_name".to_string(), configuration_name); + } + + proto::api::MonitoredResource { + r#type: "cloud_run_revision".to_owned(), + labels, + } + } + MonitoredResource::GenericNode { + project_id, + location, + namespace, + node_id, + } => { + labels.insert("project_id".to_string(), project_id); + if let Some(location) = location { + labels.insert("location".to_string(), location); + } + if let Some(namespace) = namespace { + labels.insert("namespace".to_string(), namespace); + } + if let Some(node_id) = node_id { + labels.insert("node_id".to_string(), node_id); + } + + proto::api::MonitoredResource { + r#type: "generic_node".to_owned(), + labels, + } + } + MonitoredResource::GenericTask { + project_id, + location, + namespace, + job, + task_id, + } => { + labels.insert("project_id".to_owned(), project_id); + if let Some(location) = location { + labels.insert("location".to_owned(), location); + } + if let Some(namespace) = namespace { + labels.insert("namespace".to_owned(), namespace); + } + if let Some(job) = job { + labels.insert("job".to_owned(), job); + } + if let Some(task_id) = task_id { + labels.insert("task_id".to_owned(), task_id); + } + + proto::api::MonitoredResource { + r#type: "generic_task".to_owned(), + labels, + } + } + MonitoredResource::Global { project_id } => { + labels.insert("project_id".to_owned(), project_id); + proto::api::MonitoredResource { + r#type: "global".to_owned(), + labels, + } + } + }; + + Self { + log_id: cx.log_id, + resource, + } + } +} + +/// A description of a `MonitoredResource`. +/// +/// Possible values are listed in the [API documentation](https://cloud.google.com/logging/docs/api/v2/resource-list). +/// Please submit an issue or pull request if you want to use a resource type not listed here. +#[derive(Clone)] +pub enum MonitoredResource { + Global { + project_id: String, + }, + GenericNode { + project_id: String, + location: Option, + namespace: Option, + node_id: Option, + }, + GenericTask { + project_id: String, + location: Option, + namespace: Option, + job: Option, + task_id: Option, + }, + CloudRunRevision { + project_id: String, + service_name: Option, + revision_name: Option, + location: Option, + configuration_name: Option, + }, +} + +impl From<(Vec, &Resource)> for Attributes { + /// Combines `EvictedHashMap` and `Resource` attributes into a maximum of 32. + /// + /// The `Resource` takes precedence over the `EvictedHashMap` attributes. + fn from((attributes, resource): (Vec, &Resource)) -> Self { + let mut dropped_attributes_count: i32 = 0; + let num_resource_attributes = resource.len(); + let num_attributes = attributes.len(); + + let attributes_as_key_value_tuples: Vec<(Key, Value)> = attributes + .into_iter() + .map(|kv| (kv.key, kv.value)) + .collect(); + + let attribute_map = resource + .into_iter() + .map(|(k, v)| (k.clone(), v.clone())) + .chain(attributes_as_key_value_tuples) + .flat_map(|(k, v)| { + let key = k.as_str(); + if key.len() > 128 { + dropped_attributes_count += 1; + return None; + } + + if k == SERVICE_NAME { + return Some((GCP_SERVICE_NAME.to_owned(), v.into())); + } else if key == HTTP_PATH_ATTRIBUTE { + return Some((GCP_HTTP_PATH.to_owned(), v.into())); + } + + for (otel_key, gcp_key) in KEY_MAP { + if otel_key == &k { + return Some((gcp_key.to_owned(), v.into())); + } + } + + Some((key.to_owned(), v.into())) + }) + .take(MAX_ATTRIBUTES_PER_SPAN) + .collect(); + + Attributes { + attribute_map, + dropped_attributes_count: dropped_attributes_count + + (num_resource_attributes + num_attributes).saturating_sub(MAX_ATTRIBUTES_PER_SPAN) + as i32, + } + } +} + +fn transform_links(links: &opentelemetry_sdk::trace::SpanLinks) -> Option { + if links.is_empty() { + return None; + } + + Some(Links { + dropped_links_count: links.dropped_count as i32, + link: links + .iter() + .map(|link| Link { + trace_id: hex::encode(link.span_context.trace_id().to_bytes()), + span_id: hex::encode(link.span_context.span_id().to_bytes()), + ..Default::default() + }) + .collect(), + }) +} + +// Map conventional OpenTelemetry keys to their GCP counterparts. +const KEY_MAP: [(&Key, &str); 7] = [ + (&HTTP_HOST, "/http/host"), + (&HTTP_METHOD, "/http/method"), + (&HTTP_TARGET, "/http/path"), + (&HTTP_URL, "/http/url"), + (&HTTP_USER_AGENT, "/http/user_agent"), + (&HTTP_STATUS_CODE, "/http/status_code"), + (&HTTP_ROUTE, "/http/route"), +]; + +impl From for SpanKind { + fn from(span_kind: opentelemetry::trace::SpanKind) -> Self { + match span_kind { + opentelemetry::trace::SpanKind::Client => SpanKind::Client, + opentelemetry::trace::SpanKind::Server => SpanKind::Server, + opentelemetry::trace::SpanKind::Producer => SpanKind::Producer, + opentelemetry::trace::SpanKind::Consumer => SpanKind::Consumer, + opentelemetry::trace::SpanKind::Internal => SpanKind::Internal, + } + } +} + +fn status(value: opentelemetry::trace::Status) -> Option { + match value { + opentelemetry::trace::Status::Ok => Some(Status { + code: Code::Ok as i32, + message: "".to_owned(), + details: vec![], + }), + opentelemetry::trace::Status::Unset => None, + opentelemetry::trace::Status::Error { description } => Some(Status { + code: Code::Unknown as i32, + message: description.into(), + details: vec![], + }), + } +} +const TRACE_APPEND: &str = "https://www.googleapis.com/auth/trace.append"; +const LOGGING_WRITE: &str = "https://www.googleapis.com/auth/logging.write"; +const HTTP_PATH_ATTRIBUTE: &str = "http.path"; +const GCP_HTTP_PATH: &str = "/http/path"; +const GCP_SERVICE_NAME: &str = "g.co/gae/app/module"; +const MAX_ATTRIBUTES_PER_SPAN: usize = 32; + +#[cfg(test)] +mod tests { + use super::*; + use opentelemetry::{KeyValue, Value}; + use opentelemetry_semantic_conventions as semcov; + + #[test] + fn test_attributes_mapping() { + let capacity = 10; + let mut attributes = Vec::with_capacity(capacity); + + // hostAttribute = "http.host" + attributes.push(HTTP_HOST.string("example.com:8080")); + + // methodAttribute = "http.method" + attributes.push(semcov::trace::HTTP_METHOD.string("POST")); + + // pathAttribute = "http.path" + attributes.push(KeyValue::new( + "http.path", + Value::String("/path/12314/?q=ddds#123".into()), + )); + + // urlAttribute = "http.url" + attributes.push( + semcov::trace::HTTP_URL.string("https://example.com:8080/webshop/articles/4?s=1"), + ); + + // userAgentAttribute = "http.user_agent" + attributes.push(HTTP_USER_AGENT.string("CERN-LineMode/2.15 libwww/2.17b3")); + + // statusCodeAttribute = "http.status_code" + attributes.push(semcov::trace::HTTP_STATUS_CODE.i64(200)); + + // statusCodeAttribute = "http.route" + attributes.push(semcov::trace::HTTP_ROUTE.string("/webshop/articles/:article_id")); + + // serviceAttribute = "service.name" + let resources = Resource::new([semcov::resource::SERVICE_NAME.string("Test Service Name")]); + + let actual: Attributes = (attributes, &resources).into(); + + assert_eq!(actual.attribute_map.len(), 8); + assert_eq!(actual.dropped_attributes_count, 0); + assert_eq!( + actual.attribute_map.get("/http/host"), + Some(&AttributeValue::from(Value::String( + "example.com:8080".into() + ))) + ); + assert_eq!( + actual.attribute_map.get("/http/method"), + Some(&AttributeValue::from(Value::String("POST".into()))), + ); + assert_eq!( + actual.attribute_map.get("/http/path"), + Some(&AttributeValue::from(Value::String( + "/path/12314/?q=ddds#123".into() + ))), + ); + assert_eq!( + actual.attribute_map.get("/http/route"), + Some(&AttributeValue::from(Value::String( + "/webshop/articles/:article_id".into() + ))), + ); + assert_eq!( + actual.attribute_map.get("/http/url"), + Some(&AttributeValue::from(Value::String( + "https://example.com:8080/webshop/articles/4?s=1".into(), + ))), + ); + assert_eq!( + actual.attribute_map.get("/http/user_agent"), + Some(&AttributeValue::from(Value::String( + "CERN-LineMode/2.15 libwww/2.17b3".into() + ))), + ); + assert_eq!( + actual.attribute_map.get("/http/status_code"), + Some(&AttributeValue::from(Value::I64(200))), + ); + assert_eq!( + actual.attribute_map.get("g.co/gae/app/module"), + Some(&AttributeValue::from(Value::String( + "Test Service Name".into() + ))), + ); + } + + #[test] + fn test_too_many() { + let resources = Resource::new([semcov::resource::SERVICE_NAME.string("Test Service Name")]); + let mut attributes = Vec::with_capacity(32); + for i in 0..32 { + attributes.push(KeyValue::new( + format!("key{}", i), + Value::String(format!("value{}", i).into()), + )); + } + + let actual: Attributes = (attributes, &resources).into(); + + assert_eq!(actual.attribute_map.len(), 32); + assert_eq!(actual.dropped_attributes_count, 1); + assert_eq!( + actual.attribute_map.get("g.co/gae/app/module"), + Some(&AttributeValue::from(Value::String( + "Test Service Name".into() + ))), + ); + } + + #[test] + fn test_attributes_mapping_http_target() { + let attributes = vec![semcov::trace::HTTP_TARGET.string("/path/12314/?q=ddds#123")]; + + // hostAttribute = "http.target" + + let resources = Resource::new([]); + let actual: Attributes = (attributes, &resources).into(); + + assert_eq!(actual.attribute_map.len(), 1); + assert_eq!(actual.dropped_attributes_count, 0); + assert_eq!( + actual.attribute_map.get("/http/path"), + Some(&AttributeValue::from(Value::String( + "/path/12314/?q=ddds#123".into() + ))), + ); + } + + #[test] + fn test_attributes_mapping_dropped_attributes_count() { + let attributes = vec![KeyValue::new("answer", Value::I64(42)),KeyValue::new("long_attribute_key_dvwmacxpeefbuemoxljmqvldjxmvvihoeqnuqdsyovwgljtnemouidabhkmvsnauwfnaihekcfwhugejboiyfthyhmkpsaxtidlsbwsmirebax", Value::String("Some value".into()))]; + + let resources = Resource::new([]); + let actual: Attributes = (attributes, &resources).into(); + assert_eq!( + actual, + Attributes { + attribute_map: HashMap::from([( + "answer".into(), + AttributeValue::from(Value::I64(42)) + ),]), + dropped_attributes_count: 1, + } + ); + assert_eq!(actual.attribute_map.len(), 1); + assert_eq!(actual.dropped_attributes_count, 1); + } +} diff --git a/opentelemetry-stackdriver/src/proto/api.rs b/opentelemetry-stackdriver/src/proto/api.rs new file mode 100644 index 00000000..eeeb2f2c --- /dev/null +++ b/opentelemetry-stackdriver/src/proto/api.rs @@ -0,0 +1,1237 @@ +/// Defines the HTTP configuration for an API service. It contains a list of +/// \[HttpRule][google.api.HttpRule\], each specifying the mapping of an RPC method +/// to one or more HTTP REST API methods. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Http { + /// A list of HTTP configuration rules that apply to individual API methods. + /// + /// **NOTE:** All service configuration rules follow "last one wins" order. + #[prost(message, repeated, tag = "1")] + pub rules: ::prost::alloc::vec::Vec, + /// When set to true, URL path parameters will be fully URI-decoded except in + /// cases of single segment matches in reserved expansion, where "%2F" will be + /// left encoded. + /// + /// The default behavior is to not decode RFC 6570 reserved characters in multi + /// segment matches. + #[prost(bool, tag = "2")] + pub fully_decode_reserved_expansion: bool, +} +/// # gRPC Transcoding +/// +/// gRPC Transcoding is a feature for mapping between a gRPC method and one or +/// more HTTP REST endpoints. It allows developers to build a single API service +/// that supports both gRPC APIs and REST APIs. Many systems, including [Google +/// APIs](), +/// [Cloud Endpoints](), [gRPC +/// Gateway](), +/// and \[Envoy\]() proxy support this feature +/// and use it for large scale production services. +/// +/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +/// how different portions of the gRPC request message are mapped to the URL +/// path, URL query parameters, and HTTP request body. It also controls how the +/// gRPC response message is mapped to the HTTP response body. `HttpRule` is +/// typically specified as an `google.api.http` annotation on the gRPC method. +/// +/// Each mapping specifies a URL path template and an HTTP method. The path +/// template may refer to one or more fields in the gRPC request message, as long +/// as each field is a non-repeated field with a primitive (non-message) type. +/// The path template controls how fields of the request message are mapped to +/// the URL path. +/// +/// Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/{name=messages/*}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string name = 1; // Mapped to URL path. +/// } +/// message Message { +/// string text = 1; // The resource content. +/// } +/// +/// This enables an HTTP REST to gRPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +/// +/// Any fields in the request message which are not bound by the path template +/// automatically become HTTP query parameters if there is no HTTP request body. +/// For example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get:"/v1/messages/{message_id}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// message SubMessage { +/// string subfield = 1; +/// } +/// string message_id = 1; // Mapped to URL path. +/// int64 revision = 2; // Mapped to URL query parameter `revision`. +/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +/// } +/// +/// This enables a HTTP JSON to RPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +/// "foo"))` +/// +/// Note that fields which are mapped to URL query parameters must have a +/// primitive type or a repeated primitive type or a non-repeated message type. +/// In the case of a repeated type, the parameter can be repeated in the URL +/// as `...?param=A¶m=B`. In the case of a message type, each field of the +/// message is mapped to a separate parameter, such as +/// `...?foo.a=A&foo.b=B&foo.c=C`. +/// +/// For HTTP methods that allow a request body, the `body` field +/// specifies the mapping. Consider a REST update method on the +/// message resource collection: +/// +/// service Messaging { +/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "message" +/// }; +/// } +/// } +/// message UpdateMessageRequest { +/// string message_id = 1; // mapped to the URL +/// Message message = 2; // mapped to the body +/// } +/// +/// The following HTTP JSON to RPC mapping is enabled, where the +/// representation of the JSON in the request body is determined by +/// protos JSON encoding: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" message { text: "Hi!" })` +/// +/// The special name `*` can be used in the body mapping to define that +/// every field not bound by the path template should be mapped to the +/// request body. This enables the following alternative definition of +/// the update method: +/// +/// service Messaging { +/// rpc UpdateMessage(Message) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "*" +/// }; +/// } +/// } +/// message Message { +/// string message_id = 1; +/// string text = 2; +/// } +/// +/// +/// The following HTTP JSON to RPC mapping is enabled: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" text: "Hi!")` +/// +/// Note that when using `*` in the body mapping, it is not possible to +/// have HTTP parameters, as all fields not bound by the path end in +/// the body. This makes this option more rarely used in practice when +/// defining REST APIs. The common usage of `*` is in custom methods +/// which don't use the URL at all for transferring data. +/// +/// It is possible to define multiple HTTP methods for one RPC by using +/// the `additional_bindings` option. Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/messages/{message_id}" +/// additional_bindings { +/// get: "/v1/users/{user_id}/messages/{message_id}" +/// } +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string message_id = 1; +/// string user_id = 2; +/// } +/// +/// This enables the following two alternative HTTP JSON to RPC mappings: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +/// "123456")` +/// +/// ## Rules for HTTP mapping +/// +/// 1. Leaf request fields (recursive expansion nested messages in the request +/// message) are classified into three categories: +/// - Fields referred by the path template. They are passed via the URL path. +/// - Fields referred by the \[HttpRule.body][google.api.HttpRule.body\]. They are passed via the HTTP +/// request body. +/// - All other fields are passed via the URL query parameters, and the +/// parameter name is the field path in the request message. A repeated +/// field can be represented as multiple query parameters under the same +/// name. +/// 2. If \[HttpRule.body][google.api.HttpRule.body\] is "*", there is no URL query parameter, all fields +/// are passed via URL path and HTTP request body. +/// 3. If \[HttpRule.body][google.api.HttpRule.body\] is omitted, there is no HTTP request body, all +/// fields are passed via URL path and URL query parameters. +/// +/// ### Path template syntax +/// +/// Template = "/" Segments [ Verb ] ; +/// Segments = Segment { "/" Segment } ; +/// Segment = "*" | "**" | LITERAL | Variable ; +/// Variable = "{" FieldPath [ "=" Segments ] "}" ; +/// FieldPath = IDENT { "." IDENT } ; +/// Verb = ":" LITERAL ; +/// +/// The syntax `*` matches a single URL path segment. The syntax `**` matches +/// zero or more URL path segments, which must be the last part of the URL path +/// except the `Verb`. +/// +/// The syntax `Variable` matches part of the URL path as specified by its +/// template. A variable template must not contain other variables. If a variable +/// matches a single path segment, its template may be omitted, e.g. `{var}` +/// is equivalent to `{var=*}`. +/// +/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +/// contains any reserved character, such characters should be percent-encoded +/// before the matching. +/// +/// If a variable contains exactly one path segment, such as `"{var}"` or +/// `"{var=*}"`, when such a variable is expanded into a URL path on the client +/// side, all characters except `\[-_.~0-9a-zA-Z\]` are percent-encoded. The +/// server side does the reverse decoding. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{var}`. +/// +/// If a variable contains multiple path segments, such as `"{var=foo/*}"` +/// or `"{var=**}"`, when such a variable is expanded into a URL path on the +/// client side, all characters except `\[-_.~/0-9a-zA-Z\]` are percent-encoded. +/// The server side does the reverse decoding, except "%2F" and "%2f" are left +/// unchanged. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{+var}`. +/// +/// ## Using gRPC API Service Configuration +/// +/// gRPC API Service Configuration (service config) is a configuration language +/// for configuring a gRPC service to become a user-facing product. The +/// service config is simply the YAML representation of the `google.api.Service` +/// proto message. +/// +/// As an alternative to annotating your proto file, you can configure gRPC +/// transcoding in your service config YAML files. You do this by specifying a +/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +/// effect as the proto annotation. This can be particularly useful if you +/// have a proto that is reused in multiple services. Note that any transcoding +/// specified in the service config will override any matching transcoding +/// configuration in the proto. +/// +/// Example: +/// +/// http: +/// rules: +/// # Selects a gRPC method and applies HttpRule to it. +/// - selector: example.v1.Messaging.GetMessage +/// get: /v1/messages/{message_id}/{sub.subfield} +/// +/// ## Special notes +/// +/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +/// proto to JSON conversion must follow the [proto3 +/// specification](). +/// +/// While the single segment variable follows the semantics of +/// [RFC 6570]() Section 3.2.2 Simple String +/// Expansion, the multi segment variable **does not** follow RFC 6570 Section +/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +/// does not expand special characters like `?` and `#`, which would lead +/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +/// for multi segment variables. +/// +/// The path variables **must not** refer to any repeated or mapped field, +/// because client libraries are not capable of handling such variable expansion. +/// +/// The path variables **must not** capture the leading "/" character. The reason +/// is that the most common use case "{var}" does not capture the leading "/" +/// character. For consistency, all path variables must share the same behavior. +/// +/// Repeated message fields must not be mapped to URL query parameters, because +/// no client library can support such complicated mapping. +/// +/// If an API needs to use a JSON array for request or response body, it can map +/// the request or response body to a repeated field. However, some gRPC +/// Transcoding implementations may not support this feature. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HttpRule { + /// Selects a method to which this rule applies. + /// + /// Refer to \[selector][google.api.DocumentationRule.selector\] for syntax details. + #[prost(string, tag = "1")] + pub selector: ::prost::alloc::string::String, + /// The name of the request field whose value is mapped to the HTTP request + /// body, or `*` for mapping all request fields not captured by the path + /// pattern to the HTTP body, or omitted for not having any HTTP request body. + /// + /// NOTE: the referred field must be present at the top-level of the request + /// message type. + #[prost(string, tag = "7")] + pub body: ::prost::alloc::string::String, + /// Optional. The name of the response field whose value is mapped to the HTTP + /// response body. When omitted, the entire response message will be used + /// as the HTTP response body. + /// + /// NOTE: The referred field must be present at the top-level of the response + /// message type. + #[prost(string, tag = "12")] + pub response_body: ::prost::alloc::string::String, + /// Additional HTTP bindings for the selector. Nested bindings must + /// not contain an `additional_bindings` field themselves (that is, + /// the nesting may only be one level deep). + #[prost(message, repeated, tag = "11")] + pub additional_bindings: ::prost::alloc::vec::Vec, + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")] + pub pattern: ::core::option::Option, +} +/// Nested message and enum types in `HttpRule`. +pub mod http_rule { + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Pattern { + /// Maps to HTTP GET. Used for listing and getting information about + /// resources. + #[prost(string, tag = "2")] + Get(::prost::alloc::string::String), + /// Maps to HTTP PUT. Used for replacing a resource. + #[prost(string, tag = "3")] + Put(::prost::alloc::string::String), + /// Maps to HTTP POST. Used for creating a resource or performing an action. + #[prost(string, tag = "4")] + Post(::prost::alloc::string::String), + /// Maps to HTTP DELETE. Used for deleting a resource. + #[prost(string, tag = "5")] + Delete(::prost::alloc::string::String), + /// Maps to HTTP PATCH. Used for updating a resource. + #[prost(string, tag = "6")] + Patch(::prost::alloc::string::String), + /// The custom pattern is used for specifying an HTTP method that is not + /// included in the `pattern` field, such as HEAD, or "*" to leave the + /// HTTP method unspecified for this rule. The wild-card rule is useful + /// for services that provide content to Web (HTML) clients. + #[prost(message, tag = "8")] + Custom(super::CustomHttpPattern), + } +} +/// A custom pattern is used for defining custom HTTP verb. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CustomHttpPattern { + /// The name of this custom HTTP verb. + #[prost(string, tag = "1")] + pub kind: ::prost::alloc::string::String, + /// The path matched by this custom verb. + #[prost(string, tag = "2")] + pub path: ::prost::alloc::string::String, +} +/// The launch stage as defined by [Google Cloud Platform +/// Launch Stages](). +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LaunchStage { + /// Do not use this default value. + Unspecified = 0, + /// The feature is not yet implemented. Users can not use it. + Unimplemented = 6, + /// Prelaunch features are hidden from users and are only visible internally. + Prelaunch = 7, + /// Early Access features are limited to a closed group of testers. To use + /// these features, you must sign up in advance and sign a Trusted Tester + /// agreement (which includes confidentiality provisions). These features may + /// be unstable, changed in backward-incompatible ways, and are not + /// guaranteed to be released. + EarlyAccess = 1, + /// Alpha is a limited availability test for releases before they are cleared + /// for widespread use. By Alpha, all significant design issues are resolved + /// and we are in the process of verifying functionality. Alpha customers + /// need to apply for access, agree to applicable terms, and have their + /// projects allowlisted. Alpha releases don't have to be feature complete, + /// no SLAs are provided, and there are no technical support obligations, but + /// they will be far enough along that customers can actually use them in + /// test environments or for limited-use tests -- just like they would in + /// normal production cases. + Alpha = 2, + /// Beta is the point at which we are ready to open a release for any + /// customer to use. There are no SLA or technical support obligations in a + /// Beta release. Products will be complete from a feature perspective, but + /// may have some open outstanding issues. Beta releases are suitable for + /// limited production use cases. + Beta = 3, + /// GA features are open to all developers and are considered stable and + /// fully qualified for production use. + Ga = 4, + /// Deprecated features are scheduled to be shut down and removed. For more + /// information, see the "Deprecation Policy" section of our [Terms of + /// Service]() + /// and the [Google Cloud Platform Subject to the Deprecation + /// Policy]() documentation. + Deprecated = 5, +} +impl LaunchStage { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LaunchStage::Unspecified => "LAUNCH_STAGE_UNSPECIFIED", + LaunchStage::Unimplemented => "UNIMPLEMENTED", + LaunchStage::Prelaunch => "PRELAUNCH", + LaunchStage::EarlyAccess => "EARLY_ACCESS", + LaunchStage::Alpha => "ALPHA", + LaunchStage::Beta => "BETA", + LaunchStage::Ga => "GA", + LaunchStage::Deprecated => "DEPRECATED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LAUNCH_STAGE_UNSPECIFIED" => Some(Self::Unspecified), + "UNIMPLEMENTED" => Some(Self::Unimplemented), + "PRELAUNCH" => Some(Self::Prelaunch), + "EARLY_ACCESS" => Some(Self::EarlyAccess), + "ALPHA" => Some(Self::Alpha), + "BETA" => Some(Self::Beta), + "GA" => Some(Self::Ga), + "DEPRECATED" => Some(Self::Deprecated), + _ => None, + } + } +} +/// Required information for every language. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CommonLanguageSettings { + /// Link to automatically generated reference documentation. Example: + /// + #[deprecated] + #[prost(string, tag = "1")] + pub reference_docs_uri: ::prost::alloc::string::String, + /// The destination where API teams want this client library to be published. + #[prost(enumeration = "ClientLibraryDestination", repeated, tag = "2")] + pub destinations: ::prost::alloc::vec::Vec, +} +/// Details about how and where to publish client libraries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClientLibrarySettings { + /// Version of the API to apply these settings to. + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + /// Launch stage of this version of the API. + #[prost(enumeration = "LaunchStage", tag = "2")] + pub launch_stage: i32, + /// When using transport=rest, the client request will encode enums as + /// numbers rather than strings. + #[prost(bool, tag = "3")] + pub rest_numeric_enums: bool, + /// Settings for legacy Java features, supported in the Service YAML. + #[prost(message, optional, tag = "21")] + pub java_settings: ::core::option::Option, + /// Settings for C++ client libraries. + #[prost(message, optional, tag = "22")] + pub cpp_settings: ::core::option::Option, + /// Settings for PHP client libraries. + #[prost(message, optional, tag = "23")] + pub php_settings: ::core::option::Option, + /// Settings for Python client libraries. + #[prost(message, optional, tag = "24")] + pub python_settings: ::core::option::Option, + /// Settings for Node client libraries. + #[prost(message, optional, tag = "25")] + pub node_settings: ::core::option::Option, + /// Settings for .NET client libraries. + #[prost(message, optional, tag = "26")] + pub dotnet_settings: ::core::option::Option, + /// Settings for Ruby client libraries. + #[prost(message, optional, tag = "27")] + pub ruby_settings: ::core::option::Option, + /// Settings for Go client libraries. + #[prost(message, optional, tag = "28")] + pub go_settings: ::core::option::Option, +} +/// This message configures the settings for publishing [Google Cloud Client +/// libraries]() +/// generated from the service config. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Publishing { + /// A list of API method settings, e.g. the behavior for methods that use the + /// long-running operation pattern. + #[prost(message, repeated, tag = "2")] + pub method_settings: ::prost::alloc::vec::Vec, + /// Link to a place that API users can report issues. Example: + /// + #[prost(string, tag = "101")] + pub new_issue_uri: ::prost::alloc::string::String, + /// Link to product home page. Example: + /// + #[prost(string, tag = "102")] + pub documentation_uri: ::prost::alloc::string::String, + /// Used as a tracking tag when collecting data about the APIs developer + /// relations artifacts like docs, packages delivered to package managers, + /// etc. Example: "speech". + #[prost(string, tag = "103")] + pub api_short_name: ::prost::alloc::string::String, + /// GitHub label to apply to issues and pull requests opened for this API. + #[prost(string, tag = "104")] + pub github_label: ::prost::alloc::string::String, + /// GitHub teams to be added to CODEOWNERS in the directory in GitHub + /// containing source code for the client libraries for this API. + #[prost(string, repeated, tag = "105")] + pub codeowner_github_teams: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// A prefix used in sample code when demarking regions to be included in + /// documentation. + #[prost(string, tag = "106")] + pub doc_tag_prefix: ::prost::alloc::string::String, + /// For whom the client library is being published. + #[prost(enumeration = "ClientLibraryOrganization", tag = "107")] + pub organization: i32, + /// Client library settings. If the same version string appears multiple + /// times in this list, then the last one wins. Settings from earlier + /// settings with the same version string are discarded. + #[prost(message, repeated, tag = "109")] + pub library_settings: ::prost::alloc::vec::Vec, +} +/// Settings for Java client libraries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JavaSettings { + /// The package name to use in Java. Clobbers the java_package option + /// set in the protobuf. This should be used **only** by APIs + /// who have already set the language_settings.java.package_name" field + /// in gapic.yaml. API teams should use the protobuf java_package option + /// where possible. + /// + /// Example of a YAML configuration:: + /// + /// publishing: + /// java_settings: + /// library_package: com.google.cloud.pubsub.v1 + #[prost(string, tag = "1")] + pub library_package: ::prost::alloc::string::String, + /// Configure the Java class name to use instead of the service's for its + /// corresponding generated GAPIC client. Keys are fully-qualified + /// service names as they appear in the protobuf (including the full + /// the language_settings.java.interface_names" field in gapic.yaml. API + /// teams should otherwise use the service name as it appears in the + /// protobuf. + /// + /// Example of a YAML configuration:: + /// + /// publishing: + /// java_settings: + /// service_class_names: + /// - google.pubsub.v1.Publisher: TopicAdmin + /// - google.pubsub.v1.Subscriber: SubscriptionAdmin + #[prost(map = "string, string", tag = "2")] + pub service_class_names: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Some settings. + #[prost(message, optional, tag = "3")] + pub common: ::core::option::Option, +} +/// Settings for C++ client libraries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CppSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Php client libraries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PhpSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Python client libraries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PythonSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Node client libraries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Dotnet client libraries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DotnetSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Ruby client libraries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RubySettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Settings for Go client libraries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GoSettings { + /// Some settings. + #[prost(message, optional, tag = "1")] + pub common: ::core::option::Option, +} +/// Describes the generator configuration for a method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MethodSettings { + /// The fully qualified name of the method, for which the options below apply. + /// This is used to find the method to apply the options. + #[prost(string, tag = "1")] + pub selector: ::prost::alloc::string::String, + /// Describes settings to use for long-running operations when generating + /// API methods for RPCs. Complements RPCs that use the annotations in + /// google/longrunning/operations.proto. + /// + /// Example of a YAML configuration:: + /// + /// publishing: + /// method_behavior: + /// - selector: CreateAdDomain + /// long_running: + /// initial_poll_delay: + /// seconds: 60 # 1 minute + /// poll_delay_multiplier: 1.5 + /// max_poll_delay: + /// seconds: 360 # 6 minutes + /// total_poll_timeout: + /// seconds: 54000 # 90 minutes + #[prost(message, optional, tag = "2")] + pub long_running: ::core::option::Option, +} +/// Nested message and enum types in `MethodSettings`. +pub mod method_settings { + /// Describes settings to use when generating API methods that use the + /// long-running operation pattern. + /// All default values below are from those used in the client library + /// generators (e.g. + /// \[Java\]()). + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct LongRunning { + /// Initial delay after which the first poll request will be made. + /// Default value: 5 seconds. + #[prost(message, optional, tag = "1")] + pub initial_poll_delay: ::core::option::Option<::prost_types::Duration>, + /// Multiplier to gradually increase delay between subsequent polls until it + /// reaches max_poll_delay. + /// Default value: 1.5. + #[prost(float, tag = "2")] + pub poll_delay_multiplier: f32, + /// Maximum time between two subsequent poll requests. + /// Default value: 45 seconds. + #[prost(message, optional, tag = "3")] + pub max_poll_delay: ::core::option::Option<::prost_types::Duration>, + /// Total polling timeout. + /// Default value: 5 minutes. + #[prost(message, optional, tag = "4")] + pub total_poll_timeout: ::core::option::Option<::prost_types::Duration>, + } +} +/// The organization for which the client libraries are being published. +/// Affects the url where generated docs are published, etc. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ClientLibraryOrganization { + /// Not useful. + Unspecified = 0, + /// Google Cloud Platform Org. + Cloud = 1, + /// Ads (Advertising) Org. + Ads = 2, + /// Photos Org. + Photos = 3, + /// Street View Org. + StreetView = 4, +} +impl ClientLibraryOrganization { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ClientLibraryOrganization::Unspecified => "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + ClientLibraryOrganization::Cloud => "CLOUD", + ClientLibraryOrganization::Ads => "ADS", + ClientLibraryOrganization::Photos => "PHOTOS", + ClientLibraryOrganization::StreetView => "STREET_VIEW", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED" => Some(Self::Unspecified), + "CLOUD" => Some(Self::Cloud), + "ADS" => Some(Self::Ads), + "PHOTOS" => Some(Self::Photos), + "STREET_VIEW" => Some(Self::StreetView), + _ => None, + } + } +} +/// To where should client libraries be published? +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ClientLibraryDestination { + /// Client libraries will neither be generated nor published to package + /// managers. + Unspecified = 0, + /// Generate the client library in a repo under github.com/googleapis, + /// but don't publish it to package managers. + Github = 10, + /// Publish the library to package managers like nuget.org and npmjs.com. + PackageManager = 20, +} +impl ClientLibraryDestination { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ClientLibraryDestination::Unspecified => "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + ClientLibraryDestination::Github => "GITHUB", + ClientLibraryDestination::PackageManager => "PACKAGE_MANAGER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED" => Some(Self::Unspecified), + "GITHUB" => Some(Self::Github), + "PACKAGE_MANAGER" => Some(Self::PackageManager), + _ => None, + } + } +} +/// An indicator of the behavior of a given field (for example, that a field +/// is required in requests, or given as output but ignored as input). +/// This **does not** change the behavior in protocol buffers itself; it only +/// denotes the behavior and may affect how API tooling handles the field. +/// +/// Note: This enum **may** receive new values in the future. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FieldBehavior { + /// Conventional default for enums. Do not use this. + Unspecified = 0, + /// Specifically denotes a field as optional. + /// While all fields in protocol buffers are optional, this may be specified + /// for emphasis if appropriate. + Optional = 1, + /// Denotes a field as required. + /// This indicates that the field **must** be provided as part of the request, + /// and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + Required = 2, + /// Denotes a field as output only. + /// This indicates that the field is provided in responses, but including the + /// field in a request does nothing (the server *must* ignore it and + /// *must not* throw an error as a result of the field's presence). + OutputOnly = 3, + /// Denotes a field as input only. + /// This indicates that the field is provided in requests, and the + /// corresponding field is not included in output. + InputOnly = 4, + /// Denotes a field as immutable. + /// This indicates that the field may be set once in a request to create a + /// resource, but may not be changed thereafter. + Immutable = 5, + /// Denotes that a (repeated) field is an unordered list. + /// This indicates that the service may provide the elements of the list + /// in any arbitrary order, rather than the order the user originally + /// provided. Additionally, the list's order may or may not be stable. + UnorderedList = 6, + /// Denotes that this field returns a non-empty default value if not set. + /// This indicates that if the user provides the empty value in a request, + /// a non-empty value will be returned. The user will not be aware of what + /// non-empty value to expect. + NonEmptyDefault = 7, +} +impl FieldBehavior { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + FieldBehavior::Unspecified => "FIELD_BEHAVIOR_UNSPECIFIED", + FieldBehavior::Optional => "OPTIONAL", + FieldBehavior::Required => "REQUIRED", + FieldBehavior::OutputOnly => "OUTPUT_ONLY", + FieldBehavior::InputOnly => "INPUT_ONLY", + FieldBehavior::Immutable => "IMMUTABLE", + FieldBehavior::UnorderedList => "UNORDERED_LIST", + FieldBehavior::NonEmptyDefault => "NON_EMPTY_DEFAULT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FIELD_BEHAVIOR_UNSPECIFIED" => Some(Self::Unspecified), + "OPTIONAL" => Some(Self::Optional), + "REQUIRED" => Some(Self::Required), + "OUTPUT_ONLY" => Some(Self::OutputOnly), + "INPUT_ONLY" => Some(Self::InputOnly), + "IMMUTABLE" => Some(Self::Immutable), + "UNORDERED_LIST" => Some(Self::UnorderedList), + "NON_EMPTY_DEFAULT" => Some(Self::NonEmptyDefault), + _ => None, + } + } +} +/// A simple descriptor of a resource type. +/// +/// ResourceDescriptor annotates a resource message (either by means of a +/// protobuf annotation or use in the service config), and associates the +/// resource's schema, the resource type, and the pattern of the resource name. +/// +/// Example: +/// +/// message Topic { +/// // Indicates this message defines a resource schema. +/// // Declares the resource type in the format of {service}/{kind}. +/// // For Kubernetes resources, the format is {api group}/{kind}. +/// option (google.api.resource) = { +/// type: "pubsub.googleapis.com/Topic" +/// pattern: "projects/{project}/topics/{topic}" +/// }; +/// } +/// +/// The ResourceDescriptor Yaml config will look like: +/// +/// resources: +/// - type: "pubsub.googleapis.com/Topic" +/// pattern: "projects/{project}/topics/{topic}" +/// +/// Sometimes, resources have multiple patterns, typically because they can +/// live under multiple parents. +/// +/// Example: +/// +/// message LogEntry { +/// option (google.api.resource) = { +/// type: "logging.googleapis.com/LogEntry" +/// pattern: "projects/{project}/logs/{log}" +/// pattern: "folders/{folder}/logs/{log}" +/// pattern: "organizations/{organization}/logs/{log}" +/// pattern: "billingAccounts/{billing_account}/logs/{log}" +/// }; +/// } +/// +/// The ResourceDescriptor Yaml config will look like: +/// +/// resources: +/// - type: 'logging.googleapis.com/LogEntry' +/// pattern: "projects/{project}/logs/{log}" +/// pattern: "folders/{folder}/logs/{log}" +/// pattern: "organizations/{organization}/logs/{log}" +/// pattern: "billingAccounts/{billing_account}/logs/{log}" +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceDescriptor { + /// The resource type. It must be in the format of + /// {service_name}/{resource_type_kind}. The `resource_type_kind` must be + /// singular and must not include version numbers. + /// + /// Example: `storage.googleapis.com/Bucket` + /// + /// The value of the resource_type_kind must follow the regular expression + /// /\[A-Za-z][a-zA-Z0-9\]+/. It should start with an upper case character and + /// should use PascalCase (UpperCamelCase). The maximum number of + /// characters allowed for the `resource_type_kind` is 100. + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + /// Optional. The relative resource name pattern associated with this resource + /// type. The DNS prefix of the full resource name shouldn't be specified here. + /// + /// The path pattern must follow the syntax, which aligns with HTTP binding + /// syntax: + /// + /// Template = Segment { "/" Segment } ; + /// Segment = LITERAL | Variable ; + /// Variable = "{" LITERAL "}" ; + /// + /// Examples: + /// + /// - "projects/{project}/topics/{topic}" + /// - "projects/{project}/knowledgeBases/{knowledge_base}" + /// + /// The components in braces correspond to the IDs for each resource in the + /// hierarchy. It is expected that, if multiple patterns are provided, + /// the same component name (e.g. "project") refers to IDs of the same + /// type of resource. + #[prost(string, repeated, tag = "2")] + pub pattern: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. The field on the resource that designates the resource name + /// field. If omitted, this is assumed to be "name". + #[prost(string, tag = "3")] + pub name_field: ::prost::alloc::string::String, + /// Optional. The historical or future-looking state of the resource pattern. + /// + /// Example: + /// + /// // The InspectTemplate message originally only supported resource + /// // names with organization, and project was added later. + /// message InspectTemplate { + /// option (google.api.resource) = { + /// type: "dlp.googleapis.com/InspectTemplate" + /// pattern: + /// "organizations/{organization}/inspectTemplates/{inspect_template}" + /// pattern: "projects/{project}/inspectTemplates/{inspect_template}" + /// history: ORIGINALLY_SINGLE_PATTERN + /// }; + /// } + #[prost(enumeration = "resource_descriptor::History", tag = "4")] + pub history: i32, + /// The plural name used in the resource name and permission names, such as + /// 'projects' for the resource name of 'projects/{project}' and the permission + /// name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same + /// concept of the `plural` field in k8s CRD spec + /// + /// + /// Note: The plural form is required even for singleton resources. See + /// + #[prost(string, tag = "5")] + pub plural: ::prost::alloc::string::String, + /// The same concept of the `singular` field in k8s CRD spec + /// + /// Such as "project" for the `resourcemanager.googleapis.com/Project` type. + #[prost(string, tag = "6")] + pub singular: ::prost::alloc::string::String, + /// Style flag(s) for this resource. + /// These indicate that a resource is expected to conform to a given + /// style. See the specific style flags for additional information. + #[prost(enumeration = "resource_descriptor::Style", repeated, tag = "10")] + pub style: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `ResourceDescriptor`. +pub mod resource_descriptor { + /// A description of the historical or future-looking state of the + /// resource pattern. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum History { + /// The "unset" value. + Unspecified = 0, + /// The resource originally had one pattern and launched as such, and + /// additional patterns were added later. + OriginallySinglePattern = 1, + /// The resource has one pattern, but the API owner expects to add more + /// later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + /// that from being necessary once there are multiple patterns.) + FutureMultiPattern = 2, + } + impl History { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + History::Unspecified => "HISTORY_UNSPECIFIED", + History::OriginallySinglePattern => "ORIGINALLY_SINGLE_PATTERN", + History::FutureMultiPattern => "FUTURE_MULTI_PATTERN", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "HISTORY_UNSPECIFIED" => Some(Self::Unspecified), + "ORIGINALLY_SINGLE_PATTERN" => Some(Self::OriginallySinglePattern), + "FUTURE_MULTI_PATTERN" => Some(Self::FutureMultiPattern), + _ => None, + } + } + } + /// A flag representing a specific style that a resource claims to conform to. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Style { + /// The unspecified value. Do not use. + Unspecified = 0, + /// This resource is intended to be "declarative-friendly". + /// + /// Declarative-friendly resources must be more strictly consistent, and + /// setting this to true communicates to tools that this resource should + /// adhere to declarative-friendly expectations. + /// + /// Note: This is used by the API linter (linter.aip.dev) to enable + /// additional checks. + DeclarativeFriendly = 1, + } + impl Style { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Style::Unspecified => "STYLE_UNSPECIFIED", + Style::DeclarativeFriendly => "DECLARATIVE_FRIENDLY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STYLE_UNSPECIFIED" => Some(Self::Unspecified), + "DECLARATIVE_FRIENDLY" => Some(Self::DeclarativeFriendly), + _ => None, + } + } + } +} +/// Defines a proto annotation that describes a string field that refers to +/// an API resource. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceReference { + /// The resource type that the annotated field references. + /// + /// Example: + /// + /// message Subscription { + /// string topic = 2 [(google.api.resource_reference) = { + /// type: "pubsub.googleapis.com/Topic" + /// }]; + /// } + /// + /// Occasionally, a field may reference an arbitrary resource. In this case, + /// APIs use the special value * in their resource reference. + /// + /// Example: + /// + /// message GetIamPolicyRequest { + /// string resource = 2 [(google.api.resource_reference) = { + /// type: "*" + /// }]; + /// } + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + /// The resource type of a child collection that the annotated field + /// references. This is useful for annotating the `parent` field that + /// doesn't have a fixed resource type. + /// + /// Example: + /// + /// message ListLogEntriesRequest { + /// string parent = 1 [(google.api.resource_reference) = { + /// child_type: "logging.googleapis.com/LogEntry" + /// }; + /// } + #[prost(string, tag = "2")] + pub child_type: ::prost::alloc::string::String, +} +/// A description of a label. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LabelDescriptor { + /// The label key. + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + /// The type of data that can be assigned to the label. + #[prost(enumeration = "label_descriptor::ValueType", tag = "2")] + pub value_type: i32, + /// A human-readable description for the label. + #[prost(string, tag = "3")] + pub description: ::prost::alloc::string::String, +} +/// Nested message and enum types in `LabelDescriptor`. +pub mod label_descriptor { + /// Value types that can be used as label values. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum ValueType { + /// A variable-length string. This is the default. + String = 0, + /// Boolean; true or false. + Bool = 1, + /// A 64-bit signed integer. + Int64 = 2, + } + impl ValueType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ValueType::String => "STRING", + ValueType::Bool => "BOOL", + ValueType::Int64 => "INT64", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STRING" => Some(Self::String), + "BOOL" => Some(Self::Bool), + "INT64" => Some(Self::Int64), + _ => None, + } + } + } +} +/// An object that describes the schema of a \[MonitoredResource][google.api.MonitoredResource\] object using a +/// type name and a set of labels. For example, the monitored resource +/// descriptor for Google Compute Engine VM instances has a type of +/// `"gce_instance"` and specifies the use of the labels `"instance_id"` and +/// `"zone"` to identify particular VM instances. +/// +/// Different APIs can support different monitored resource types. APIs generally +/// provide a `list` method that returns the monitored resource descriptors used +/// by the API. +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MonitoredResourceDescriptor { + /// Optional. The resource name of the monitored resource descriptor: + /// `"projects/{project_id}/monitoredResourceDescriptors/{type}"` where + /// {type} is the value of the `type` field in this object and + /// {project_id} is a project ID that provides API-specific context for + /// accessing the type. APIs that do not use project information can use the + /// resource name format `"monitoredResourceDescriptors/{type}"`. + #[prost(string, tag = "5")] + pub name: ::prost::alloc::string::String, + /// Required. The monitored resource type. For example, the type + /// `"cloudsql_database"` represents databases in Google Cloud SQL. + /// For a list of types, see [Monitoring resource + /// types]() + /// and [Logging resource + /// types](). + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + /// Optional. A concise name for the monitored resource type that might be + /// displayed in user interfaces. It should be a Title Cased Noun Phrase, + /// without any article or other determiners. For example, + /// `"Google Cloud SQL Database"`. + #[prost(string, tag = "2")] + pub display_name: ::prost::alloc::string::String, + /// Optional. A detailed description of the monitored resource type that might + /// be used in documentation. + #[prost(string, tag = "3")] + pub description: ::prost::alloc::string::String, + /// Required. A set of labels used to describe instances of this monitored + /// resource type. For example, an individual Google Cloud SQL database is + /// identified by values for the labels `"database_id"` and `"zone"`. + #[prost(message, repeated, tag = "4")] + pub labels: ::prost::alloc::vec::Vec, + /// Optional. The launch stage of the monitored resource definition. + #[prost(enumeration = "LaunchStage", tag = "7")] + pub launch_stage: i32, +} +/// An object representing a resource that can be used for monitoring, logging, +/// billing, or other purposes. Examples include virtual machine instances, +/// databases, and storage devices such as disks. The `type` field identifies a +/// \[MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor\] object that describes the resource's +/// schema. Information in the `labels` field identifies the actual resource and +/// its attributes according to the schema. For example, a particular Compute +/// Engine VM instance could be represented by the following object, because the +/// \[MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor\] for `"gce_instance"` has labels +/// `"project_id"`, `"instance_id"` and `"zone"`: +/// +/// { "type": "gce_instance", +/// "labels": { "project_id": "my-project", +/// "instance_id": "12345678901234", +/// "zone": "us-central1-a" }} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MonitoredResource { + /// Required. The monitored resource type. This field must match + /// the `type` field of a \[MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor\] object. For + /// example, the type of a Compute Engine VM instance is `gce_instance`. + /// Some descriptors include the service name in the type; for example, + /// the type of a Datastream stream is `datastream.googleapis.com/Stream`. + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + /// Required. Values for all of the labels listed in the associated monitored + /// resource descriptor. For example, Compute Engine VM instances use the + /// labels `"project_id"`, `"instance_id"`, and `"zone"`. + #[prost(map = "string, string", tag = "2")] + pub labels: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, +} +/// Auxiliary metadata for a \[MonitoredResource][google.api.MonitoredResource\] object. +/// \[MonitoredResource][google.api.MonitoredResource\] objects contain the minimum set of information to +/// uniquely identify a monitored resource instance. There is some other useful +/// auxiliary metadata. Monitoring and Logging use an ingestion +/// pipeline to extract metadata for cloud resources of all types, and store +/// the metadata in this message. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MonitoredResourceMetadata { + /// Output only. Values for predefined system metadata labels. + /// System labels are a kind of metadata extracted by Google, including + /// "machine_image", "vpc", "subnet_id", + /// "security_group", "name", etc. + /// System label values can be only strings, Boolean values, or a list of + /// strings. For example: + /// + /// { "name": "my-test-instance", + /// "security_group": ["a", "b", "c"], + /// "spot_instance": false } + #[prost(message, optional, tag = "1")] + pub system_labels: ::core::option::Option<::prost_types::Struct>, + /// Output only. A map of user-defined metadata labels. + #[prost(map = "string, string", tag = "2")] + pub user_labels: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, +} diff --git a/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs b/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs new file mode 100644 index 00000000..aef8a024 --- /dev/null +++ b/opentelemetry-stackdriver/src/proto/devtools/cloudtrace/v2.rs @@ -0,0 +1,626 @@ +/// A span represents a single operation within a trace. Spans can be +/// nested to form a trace tree. Often, a trace contains a root span +/// that describes the end-to-end latency, and one or more subspans for +/// its sub-operations. +/// +/// A trace can also contain multiple root spans, or none at all. +/// Spans do not need to be contiguous. There might be +/// gaps or overlaps between spans in a trace. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Span { + /// Required. The resource name of the span in the following format: + /// + /// * `projects/\[PROJECT_ID]/traces/[TRACE_ID]/spans/[SPAN_ID\]` + /// + /// `\[TRACE_ID\]` is a unique identifier for a trace within a project; + /// it is a 32-character hexadecimal encoding of a 16-byte array. It should + /// not be zero. + /// + /// `\[SPAN_ID\]` is a unique identifier for a span within a trace; it + /// is a 16-character hexadecimal encoding of an 8-byte array. It should not + /// be zero. + /// . + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Required. The `\[SPAN_ID\]` portion of the span's resource name. + #[prost(string, tag = "2")] + pub span_id: ::prost::alloc::string::String, + /// The `\[SPAN_ID\]` of this span's parent span. If this is a root span, + /// then this field must be empty. + #[prost(string, tag = "3")] + pub parent_span_id: ::prost::alloc::string::String, + /// Required. A description of the span's operation (up to 128 bytes). + /// Cloud Trace displays the description in the + /// Cloud console. + /// For example, the display name can be a qualified method name or a file name + /// and a line number where the operation is called. A best practice is to use + /// the same display name within an application and at the same call point. + /// This makes it easier to correlate spans in different traces. + #[prost(message, optional, tag = "4")] + pub display_name: ::core::option::Option, + /// Required. The start time of the span. On the client side, this is the time + /// kept by the local machine where the span execution starts. On the server + /// side, this is the time when the server's application handler starts + /// running. + #[prost(message, optional, tag = "5")] + pub start_time: ::core::option::Option<::prost_types::Timestamp>, + /// Required. The end time of the span. On the client side, this is the time + /// kept by the local machine where the span execution ends. On the server + /// side, this is the time when the server application handler stops running. + #[prost(message, optional, tag = "6")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, + /// A set of attributes on the span. You can have up to 32 attributes per + /// span. + #[prost(message, optional, tag = "7")] + pub attributes: ::core::option::Option, + /// Stack trace captured at the start of the span. + #[prost(message, optional, tag = "8")] + pub stack_trace: ::core::option::Option, + /// A set of time events. You can have up to 32 annotations and 128 message + /// events per span. + #[prost(message, optional, tag = "9")] + pub time_events: ::core::option::Option, + /// Links associated with the span. You can have up to 128 links per Span. + #[prost(message, optional, tag = "10")] + pub links: ::core::option::Option, + /// Optional. The final status for this span. + #[prost(message, optional, tag = "11")] + pub status: ::core::option::Option, + /// Optional. Set this parameter to indicate whether this span is in + /// the same process as its parent. If you do not set this parameter, + /// Trace is unable to take advantage of this helpful information. + #[prost(message, optional, tag = "12")] + pub same_process_as_parent_span: ::core::option::Option, + /// Optional. The number of child spans that were generated while this span + /// was active. If set, allows implementation to detect missing child spans. + #[prost(message, optional, tag = "13")] + pub child_span_count: ::core::option::Option, + /// Optional. Distinguishes between spans generated in a particular context. + /// For example, two spans with the same name may be distinguished using + /// `CLIENT` (caller) and `SERVER` (callee) to identify an RPC call. + #[prost(enumeration = "span::SpanKind", tag = "14")] + pub span_kind: i32, +} +/// Nested message and enum types in `Span`. +pub mod span { + /// A set of attributes as key-value pairs. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Attributes { + /// A set of attributes. Each attribute's key can be up to 128 bytes + /// long. The value can be a string up to 256 bytes, a signed 64-bit integer, + /// or the boolean values `true` or `false`. For example: + /// + /// "/instance_id": { "string_value": { "value": "my-instance" } } + /// "/http/request_bytes": { "int_value": 300 } + /// "abc.com/myattribute": { "bool_value": false } + #[prost(map = "string, message", tag = "1")] + pub attribute_map: + ::std::collections::HashMap<::prost::alloc::string::String, super::AttributeValue>, + /// The number of attributes that were discarded. Attributes can be discarded + /// because their keys are too long or because there are too many attributes. + /// If this value is 0 then all attributes are valid. + #[prost(int32, tag = "2")] + pub dropped_attributes_count: i32, + } + /// A time-stamped annotation or message event in the Span. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct TimeEvent { + /// The timestamp indicating the time the event occurred. + #[prost(message, optional, tag = "1")] + pub time: ::core::option::Option<::prost_types::Timestamp>, + /// A `TimeEvent` can contain either an `Annotation` object or a + /// `MessageEvent` object, but not both. + #[prost(oneof = "time_event::Value", tags = "2, 3")] + pub value: ::core::option::Option, + } + /// Nested message and enum types in `TimeEvent`. + pub mod time_event { + /// Text annotation with a set of attributes. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Annotation { + /// A user-supplied message describing the event. The maximum length for + /// the description is 256 bytes. + #[prost(message, optional, tag = "1")] + pub description: ::core::option::Option, + /// A set of attributes on the annotation. You can have up to 4 attributes + /// per Annotation. + #[prost(message, optional, tag = "2")] + pub attributes: ::core::option::Option, + } + /// An event describing a message sent/received between Spans. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MessageEvent { + /// Type of MessageEvent. Indicates whether the message was sent or + /// received. + #[prost(enumeration = "message_event::Type", tag = "1")] + pub r#type: i32, + /// An identifier for the MessageEvent's message that can be used to match + /// `SENT` and `RECEIVED` MessageEvents. + #[prost(int64, tag = "2")] + pub id: i64, + /// The number of uncompressed bytes sent or received. + #[prost(int64, tag = "3")] + pub uncompressed_size_bytes: i64, + /// The number of compressed bytes sent or received. If missing, the + /// compressed size is assumed to be the same size as the uncompressed + /// size. + #[prost(int64, tag = "4")] + pub compressed_size_bytes: i64, + } + /// Nested message and enum types in `MessageEvent`. + pub mod message_event { + /// Indicates whether the message was sent or received. + #[derive( + Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, + )] + #[repr(i32)] + pub enum Type { + /// Unknown event type. + Unspecified = 0, + /// Indicates a sent message. + Sent = 1, + /// Indicates a received message. + Received = 2, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::Unspecified => "TYPE_UNSPECIFIED", + Type::Sent => "SENT", + Type::Received => "RECEIVED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "SENT" => Some(Self::Sent), + "RECEIVED" => Some(Self::Received), + _ => None, + } + } + } + } + /// A `TimeEvent` can contain either an `Annotation` object or a + /// `MessageEvent` object, but not both. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + /// Text annotation with a set of attributes. + #[prost(message, tag = "2")] + Annotation(Annotation), + /// An event describing a message sent/received between Spans. + #[prost(message, tag = "3")] + MessageEvent(MessageEvent), + } + } + /// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation + /// on the span, consisting of either user-supplied key:value pairs, or + /// details of a message sent/received between Spans. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct TimeEvents { + /// A collection of `TimeEvent`s. + #[prost(message, repeated, tag = "1")] + pub time_event: ::prost::alloc::vec::Vec, + /// The number of dropped annotations in all the included time events. + /// If the value is 0, then no annotations were dropped. + #[prost(int32, tag = "2")] + pub dropped_annotations_count: i32, + /// The number of dropped message events in all the included time events. + /// If the value is 0, then no message events were dropped. + #[prost(int32, tag = "3")] + pub dropped_message_events_count: i32, + } + /// A pointer from the current span to another span in the same trace or in a + /// different trace. For example, this can be used in batching operations, + /// where a single batch handler processes multiple requests from different + /// traces or when the handler receives a request from a different project. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Link { + /// The `\[TRACE_ID\]` for a trace within a project. + #[prost(string, tag = "1")] + pub trace_id: ::prost::alloc::string::String, + /// The `\[SPAN_ID\]` for a span within a trace. + #[prost(string, tag = "2")] + pub span_id: ::prost::alloc::string::String, + /// The relationship of the current span relative to the linked span. + #[prost(enumeration = "link::Type", tag = "3")] + pub r#type: i32, + /// A set of attributes on the link. Up to 32 attributes can be + /// specified per link. + #[prost(message, optional, tag = "4")] + pub attributes: ::core::option::Option, + } + /// Nested message and enum types in `Link`. + pub mod link { + /// The relationship of the current span relative to the linked span: child, + /// parent, or unspecified. + #[derive( + Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, + )] + #[repr(i32)] + pub enum Type { + /// The relationship of the two spans is unknown. + Unspecified = 0, + /// The linked span is a child of the current span. + ChildLinkedSpan = 1, + /// The linked span is a parent of the current span. + ParentLinkedSpan = 2, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::Unspecified => "TYPE_UNSPECIFIED", + Type::ChildLinkedSpan => "CHILD_LINKED_SPAN", + Type::ParentLinkedSpan => "PARENT_LINKED_SPAN", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TYPE_UNSPECIFIED" => Some(Self::Unspecified), + "CHILD_LINKED_SPAN" => Some(Self::ChildLinkedSpan), + "PARENT_LINKED_SPAN" => Some(Self::ParentLinkedSpan), + _ => None, + } + } + } + } + /// A collection of links, which are references from this span to a span + /// in the same or different trace. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Links { + /// A collection of links. + #[prost(message, repeated, tag = "1")] + pub link: ::prost::alloc::vec::Vec, + /// The number of dropped links after the maximum size was enforced. If + /// this value is 0, then no links were dropped. + #[prost(int32, tag = "2")] + pub dropped_links_count: i32, + } + /// Type of span. Can be used to specify additional relationships between spans + /// in addition to a parent/child relationship. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum SpanKind { + /// Unspecified. Do NOT use as default. + /// Implementations MAY assume SpanKind.INTERNAL to be default. + Unspecified = 0, + /// Indicates that the span is used internally. Default value. + Internal = 1, + /// Indicates that the span covers server-side handling of an RPC or other + /// remote network request. + Server = 2, + /// Indicates that the span covers the client-side wrapper around an RPC or + /// other remote request. + Client = 3, + /// Indicates that the span describes producer sending a message to a broker. + /// Unlike client and server, there is no direct critical path latency + /// relationship between producer and consumer spans (e.g. publishing a + /// message to a pubsub service). + Producer = 4, + /// Indicates that the span describes consumer receiving a message from a + /// broker. Unlike client and server, there is no direct critical path + /// latency relationship between producer and consumer spans (e.g. receiving + /// a message from a pubsub service subscription). + Consumer = 5, + } + impl SpanKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SpanKind::Unspecified => "SPAN_KIND_UNSPECIFIED", + SpanKind::Internal => "INTERNAL", + SpanKind::Server => "SERVER", + SpanKind::Client => "CLIENT", + SpanKind::Producer => "PRODUCER", + SpanKind::Consumer => "CONSUMER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SPAN_KIND_UNSPECIFIED" => Some(Self::Unspecified), + "INTERNAL" => Some(Self::Internal), + "SERVER" => Some(Self::Server), + "CLIENT" => Some(Self::Client), + "PRODUCER" => Some(Self::Producer), + "CONSUMER" => Some(Self::Consumer), + _ => None, + } + } + } +} +/// The allowed types for `\[VALUE\]` in a `\[KEY]:[VALUE\]` attribute. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AttributeValue { + /// The type of the value. + #[prost(oneof = "attribute_value::Value", tags = "1, 2, 3")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `AttributeValue`. +pub mod attribute_value { + /// The type of the value. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + /// A string up to 256 bytes long. + #[prost(message, tag = "1")] + StringValue(super::TruncatableString), + /// A 64-bit signed integer. + #[prost(int64, tag = "2")] + IntValue(i64), + /// A Boolean value represented by `true` or `false`. + #[prost(bool, tag = "3")] + BoolValue(bool), + } +} +/// A call stack appearing in a trace. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StackTrace { + /// Stack frames in this stack trace. A maximum of 128 frames are allowed. + #[prost(message, optional, tag = "1")] + pub stack_frames: ::core::option::Option, + /// The hash ID is used to conserve network bandwidth for duplicate + /// stack traces within a single trace. + /// + /// Often multiple spans will have identical stack traces. + /// The first occurrence of a stack trace should contain both the + /// `stackFrame` content and a value in `stackTraceHashId`. + /// + /// Subsequent spans within the same request can refer + /// to that stack trace by only setting `stackTraceHashId`. + #[prost(int64, tag = "2")] + pub stack_trace_hash_id: i64, +} +/// Nested message and enum types in `StackTrace`. +pub mod stack_trace { + /// Represents a single stack frame in a stack trace. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct StackFrame { + /// The fully-qualified name that uniquely identifies the function or + /// method that is active in this frame (up to 1024 bytes). + #[prost(message, optional, tag = "1")] + pub function_name: ::core::option::Option, + /// An un-mangled function name, if `function_name` is mangled. + /// To get information about name mangling, run + /// [this search](). + /// The name can be fully-qualified (up to 1024 bytes). + #[prost(message, optional, tag = "2")] + pub original_function_name: ::core::option::Option, + /// The name of the source file where the function call appears (up to 256 + /// bytes). + #[prost(message, optional, tag = "3")] + pub file_name: ::core::option::Option, + /// The line number in `file_name` where the function call appears. + #[prost(int64, tag = "4")] + pub line_number: i64, + /// The column number where the function call appears, if available. + /// This is important in JavaScript because of its anonymous functions. + #[prost(int64, tag = "5")] + pub column_number: i64, + /// The binary module from where the code was loaded. + #[prost(message, optional, tag = "6")] + pub load_module: ::core::option::Option, + /// The version of the deployed source code (up to 128 bytes). + #[prost(message, optional, tag = "7")] + pub source_version: ::core::option::Option, + } + /// A collection of stack frames, which can be truncated. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct StackFrames { + /// Stack frames in this call stack. + #[prost(message, repeated, tag = "1")] + pub frame: ::prost::alloc::vec::Vec, + /// The number of stack frames that were dropped because there + /// were too many stack frames. + /// If this value is 0, then no stack frames were dropped. + #[prost(int32, tag = "2")] + pub dropped_frames_count: i32, + } +} +/// Binary module. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Module { + /// For example: main binary, kernel modules, and dynamic libraries + /// such as libc.so, sharedlib.so (up to 256 bytes). + #[prost(message, optional, tag = "1")] + pub module: ::core::option::Option, + /// A unique identifier for the module, usually a hash of its + /// contents (up to 128 bytes). + #[prost(message, optional, tag = "2")] + pub build_id: ::core::option::Option, +} +/// Represents a string that might be shortened to a specified length. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TruncatableString { + /// The shortened string. For example, if the original string is 500 + /// bytes long and the limit of the string is 128 bytes, then + /// `value` contains the first 128 bytes of the 500-byte string. + /// + /// Truncation always happens on a UTF8 character boundary. If there + /// are multi-byte characters in the string, then the length of the + /// shortened string might be less than the size limit. + #[prost(string, tag = "1")] + pub value: ::prost::alloc::string::String, + /// The number of bytes removed from the original string. If this + /// value is 0, then the string was not shortened. + #[prost(int32, tag = "2")] + pub truncated_byte_count: i32, +} +/// The request message for the `BatchWriteSpans` method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchWriteSpansRequest { + /// Required. The name of the project where the spans belong. The format is + /// `projects/\[PROJECT_ID\]`. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Required. A list of new spans. The span names must not match existing + /// spans, otherwise the results are undefined. + #[prost(message, repeated, tag = "2")] + pub spans: ::prost::alloc::vec::Vec, +} +/// Generated client implementations. +pub mod trace_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; + use tonic::codegen::*; + /// Service for collecting and viewing traces and spans within a trace. + /// + /// A trace is a collection of spans corresponding to a single + /// operation or a set of operations in an application. + /// + /// A span is an individual timed event which forms a node of the trace tree. + /// A single trace can contain spans from multiple services. + #[derive(Debug, Clone)] + pub struct TraceServiceClient { + inner: tonic::client::Grpc, + } + impl TraceServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl TraceServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> TraceServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + TraceServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Batch writes new spans to new or existing traces. You cannot update + /// existing spans. + pub async fn batch_write_spans( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.devtools.cloudtrace.v2.TraceService/BatchWriteSpans", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.devtools.cloudtrace.v2.TraceService", + "BatchWriteSpans", + )); + self.inner.unary(req, path, codec).await + } + /// Creates a new span. + pub async fn create_span( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.devtools.cloudtrace.v2.TraceService/CreateSpan", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.devtools.cloudtrace.v2.TraceService", + "CreateSpan", + )); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/opentelemetry-stackdriver/src/proto/logging/type.rs b/opentelemetry-stackdriver/src/proto/logging/type.rs new file mode 100644 index 00000000..042bc23c --- /dev/null +++ b/opentelemetry-stackdriver/src/proto/logging/type.rs @@ -0,0 +1,142 @@ +/// A common proto for logging HTTP requests. Only contains semantics +/// defined by the HTTP specification. Product-specific logging +/// information MUST be defined in a separate message. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HttpRequest { + /// The request method. Examples: `"GET"`, `"HEAD"`, `"PUT"`, `"POST"`. + #[prost(string, tag = "1")] + pub request_method: ::prost::alloc::string::String, + /// The scheme (http, https), the host name, the path and the query + /// portion of the URL that was requested. + /// Example: `" + #[prost(string, tag = "2")] + pub request_url: ::prost::alloc::string::String, + /// The size of the HTTP request message in bytes, including the request + /// headers and the request body. + #[prost(int64, tag = "3")] + pub request_size: i64, + /// The response code indicating the status of response. + /// Examples: 200, 404. + #[prost(int32, tag = "4")] + pub status: i32, + /// The size of the HTTP response message sent back to the client, in bytes, + /// including the response headers and the response body. + #[prost(int64, tag = "5")] + pub response_size: i64, + /// The user agent sent by the client. Example: + /// `"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; Q312461; .NET + /// CLR 1.0.3705)"`. + #[prost(string, tag = "6")] + pub user_agent: ::prost::alloc::string::String, + /// The IP address (IPv4 or IPv6) of the client that issued the HTTP + /// request. This field can include port information. Examples: + /// `"192.168.1.1"`, `"10.0.0.1:80"`, `"FE80::0202:B3FF:FE1E:8329"`. + #[prost(string, tag = "7")] + pub remote_ip: ::prost::alloc::string::String, + /// The IP address (IPv4 or IPv6) of the origin server that the request was + /// sent to. This field can include port information. Examples: + /// `"192.168.1.1"`, `"10.0.0.1:80"`, `"FE80::0202:B3FF:FE1E:8329"`. + #[prost(string, tag = "13")] + pub server_ip: ::prost::alloc::string::String, + /// The referer URL of the request, as defined in + /// [HTTP/1.1 Header Field + /// Definitions](). + #[prost(string, tag = "8")] + pub referer: ::prost::alloc::string::String, + /// The request processing latency on the server, from the time the request was + /// received until the response was sent. + #[prost(message, optional, tag = "14")] + pub latency: ::core::option::Option<::prost_types::Duration>, + /// Whether or not a cache lookup was attempted. + #[prost(bool, tag = "11")] + pub cache_lookup: bool, + /// Whether or not an entity was served from cache + /// (with or without validation). + #[prost(bool, tag = "9")] + pub cache_hit: bool, + /// Whether or not the response was validated with the origin server before + /// being served from cache. This field is only meaningful if `cache_hit` is + /// True. + #[prost(bool, tag = "10")] + pub cache_validated_with_origin_server: bool, + /// The number of HTTP response bytes inserted into cache. Set only when a + /// cache fill was attempted. + #[prost(int64, tag = "12")] + pub cache_fill_bytes: i64, + /// Protocol used for the request. Examples: "HTTP/1.1", "HTTP/2", "websocket" + #[prost(string, tag = "15")] + pub protocol: ::prost::alloc::string::String, +} +/// The severity of the event described in a log entry, expressed as one of the +/// standard severity levels listed below. For your reference, the levels are +/// assigned the listed numeric values. The effect of using numeric values other +/// than those listed is undefined. +/// +/// You can filter for log entries by severity. For example, the following +/// filter expression will match log entries with severities `INFO`, `NOTICE`, +/// and `WARNING`: +/// +/// severity > DEBUG AND severity <= WARNING +/// +/// If you are writing log entries, you should map other severity encodings to +/// one of these standard levels. For example, you might map all of Java's FINE, +/// FINER, and FINEST levels to `LogSeverity.DEBUG`. You can preserve the +/// original severity level in the log entry payload if you wish. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LogSeverity { + /// (0) The log entry has no assigned severity level. + Default = 0, + /// (100) Debug or trace information. + Debug = 100, + /// (200) Routine information, such as ongoing status or performance. + Info = 200, + /// (300) Normal but significant events, such as start up, shut down, or + /// a configuration change. + Notice = 300, + /// (400) Warning events might cause problems. + Warning = 400, + /// (500) Error events are likely to cause problems. + Error = 500, + /// (600) Critical events cause more severe problems or outages. + Critical = 600, + /// (700) A person must take an action immediately. + Alert = 700, + /// (800) One or more systems are unusable. + Emergency = 800, +} +impl LogSeverity { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LogSeverity::Default => "DEFAULT", + LogSeverity::Debug => "DEBUG", + LogSeverity::Info => "INFO", + LogSeverity::Notice => "NOTICE", + LogSeverity::Warning => "WARNING", + LogSeverity::Error => "ERROR", + LogSeverity::Critical => "CRITICAL", + LogSeverity::Alert => "ALERT", + LogSeverity::Emergency => "EMERGENCY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DEFAULT" => Some(Self::Default), + "DEBUG" => Some(Self::Debug), + "INFO" => Some(Self::Info), + "NOTICE" => Some(Self::Notice), + "WARNING" => Some(Self::Warning), + "ERROR" => Some(Self::Error), + "CRITICAL" => Some(Self::Critical), + "ALERT" => Some(Self::Alert), + "EMERGENCY" => Some(Self::Emergency), + _ => None, + } + } +} diff --git a/opentelemetry-stackdriver/src/proto/logging/v2.rs b/opentelemetry-stackdriver/src/proto/logging/v2.rs new file mode 100644 index 00000000..390c4c75 --- /dev/null +++ b/opentelemetry-stackdriver/src/proto/logging/v2.rs @@ -0,0 +1,837 @@ +/// An individual entry in a log. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LogEntry { + /// Required. The resource name of the log to which this log entry belongs: + /// + /// "projects/\[PROJECT_ID]/logs/[LOG_ID\]" + /// "organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]" + /// "billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]" + /// "folders/\[FOLDER_ID]/logs/[LOG_ID\]" + /// + /// A project number may be used in place of PROJECT_ID. The project number is + /// translated to its corresponding PROJECT_ID internally and the `log_name` + /// field will contain PROJECT_ID in queries and exports. + /// + /// `\[LOG_ID\]` must be URL-encoded within `log_name`. Example: + /// `"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + /// + /// `\[LOG_ID\]` must be less than 512 characters long and can only include the + /// following characters: upper and lower case alphanumeric characters, + /// forward-slash, underscore, hyphen, and period. + /// + /// For backward compatibility, if `log_name` begins with a forward-slash, such + /// as `/projects/...`, then the log entry is ingested as usual, but the + /// forward-slash is removed. Listing the log entry will not show the leading + /// slash and filtering for a log name with a leading slash will never return + /// any results. + #[prost(string, tag = "12")] + pub log_name: ::prost::alloc::string::String, + /// Required. The monitored resource that produced this log entry. + /// + /// Example: a log entry that reports a database error would be associated with + /// the monitored resource designating the particular database that reported + /// the error. + #[prost(message, optional, tag = "8")] + pub resource: ::core::option::Option, + /// Optional. The time the event described by the log entry occurred. This time is used + /// to compute the log entry's age and to enforce the logs retention period. + /// If this field is omitted in a new log entry, then Logging assigns it the + /// current time. Timestamps have nanosecond accuracy, but trailing zeros in + /// the fractional seconds might be omitted when the timestamp is displayed. + /// + /// Incoming log entries must have timestamps that don't exceed the + /// [logs retention + /// period]() in + /// the past, and that don't exceed 24 hours in the future. Log entries outside + /// those time boundaries aren't ingested by Logging. + #[prost(message, optional, tag = "9")] + pub timestamp: ::core::option::Option<::prost_types::Timestamp>, + /// Output only. The time the log entry was received by Logging. + #[prost(message, optional, tag = "24")] + pub receive_timestamp: ::core::option::Option<::prost_types::Timestamp>, + /// Optional. The severity of the log entry. The default value is `LogSeverity.DEFAULT`. + #[prost(enumeration = "super::r#type::LogSeverity", tag = "10")] + pub severity: i32, + /// Optional. A unique identifier for the log entry. If you provide a value, then + /// Logging considers other log entries in the same project, with the same + /// `timestamp`, and with the same `insert_id` to be duplicates which are + /// removed in a single query result. However, there are no guarantees of + /// de-duplication in the export of logs. + /// + /// If the `insert_id` is omitted when writing a log entry, the Logging API + /// assigns its own unique identifier in this field. + /// + /// In queries, the `insert_id` is also used to order log entries that have + /// the same `log_name` and `timestamp` values. + #[prost(string, tag = "4")] + pub insert_id: ::prost::alloc::string::String, + /// Optional. Information about the HTTP request associated with this log entry, if + /// applicable. + #[prost(message, optional, tag = "7")] + pub http_request: ::core::option::Option, + /// Optional. A map of key, value pairs that provides additional information about the + /// log entry. The labels can be user-defined or system-defined. + /// + /// User-defined labels are arbitrary key, value pairs that you can use to + /// classify logs. + /// + /// System-defined labels are defined by GCP services for platform logs. + /// They have two components - a service namespace component and the + /// attribute name. For example: `compute.googleapis.com/resource_name`. + /// + /// Cloud Logging truncates label keys that exceed 512 B and label + /// values that exceed 64 KB upon their associated log entry being + /// written. The truncation is indicated by an ellipsis at the + /// end of the character string. + #[prost(map = "string, string", tag = "11")] + pub labels: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Optional. Information about an operation associated with the log entry, if + /// applicable. + #[prost(message, optional, tag = "15")] + pub operation: ::core::option::Option, + /// Optional. Resource name of the trace associated with the log entry, if any. If it + /// contains a relative resource name, the name is assumed to be relative to + /// `//tracing.googleapis.com`. Example: + /// `projects/my-projectid/traces/06796866738c859f2f19b7cfb3214824` + #[prost(string, tag = "22")] + pub trace: ::prost::alloc::string::String, + /// Optional. The span ID within the trace associated with the log entry. + /// + /// For Trace spans, this is the same format that the Trace API v2 uses: a + /// 16-character hexadecimal encoding of an 8-byte array, such as + /// `000000000000004a`. + #[prost(string, tag = "27")] + pub span_id: ::prost::alloc::string::String, + /// Optional. The sampling decision of the trace associated with the log entry. + /// + /// True means that the trace resource name in the `trace` field was sampled + /// for storage in a trace backend. False means that the trace was not sampled + /// for storage when this log entry was written, or the sampling decision was + /// unknown at the time. A non-sampled `trace` value is still useful as a + /// request correlation identifier. The default is False. + #[prost(bool, tag = "30")] + pub trace_sampled: bool, + /// Optional. Source code location information associated with the log entry, if any. + #[prost(message, optional, tag = "23")] + pub source_location: ::core::option::Option, + /// Optional. Information indicating this LogEntry is part of a sequence of multiple log + /// entries split from a single LogEntry. + #[prost(message, optional, tag = "35")] + pub split: ::core::option::Option, + /// The log entry payload, which can be one of multiple types. + #[prost(oneof = "log_entry::Payload", tags = "2, 3, 6")] + pub payload: ::core::option::Option, +} +/// Nested message and enum types in `LogEntry`. +pub mod log_entry { + /// The log entry payload, which can be one of multiple types. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Payload { + /// The log entry payload, represented as a protocol buffer. Some Google + /// Cloud Platform services use this field for their log entry payloads. + /// + /// The following protocol buffer types are supported; user-defined types + /// are not supported: + /// + /// "type.googleapis.com/google.cloud.audit.AuditLog" + /// "type.googleapis.com/google.appengine.logging.v1.RequestLog" + #[prost(message, tag = "2")] + ProtoPayload(::prost_types::Any), + /// The log entry payload, represented as a Unicode string (UTF-8). + #[prost(string, tag = "3")] + TextPayload(::prost::alloc::string::String), + /// The log entry payload, represented as a structure that is + /// expressed as a JSON object. + #[prost(message, tag = "6")] + JsonPayload(::prost_types::Struct), + } +} +/// Additional information about a potentially long-running operation with which +/// a log entry is associated. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LogEntryOperation { + /// Optional. An arbitrary operation identifier. Log entries with the same + /// identifier are assumed to be part of the same operation. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Optional. An arbitrary producer identifier. The combination of `id` and + /// `producer` must be globally unique. Examples for `producer`: + /// `"MyDivision.MyBigCompany.com"`, `"github.com/MyProject/MyApplication"`. + #[prost(string, tag = "2")] + pub producer: ::prost::alloc::string::String, + /// Optional. Set this to True if this is the first log entry in the operation. + #[prost(bool, tag = "3")] + pub first: bool, + /// Optional. Set this to True if this is the last log entry in the operation. + #[prost(bool, tag = "4")] + pub last: bool, +} +/// Additional information about the source code location that produced the log +/// entry. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LogEntrySourceLocation { + /// Optional. Source file name. Depending on the runtime environment, this + /// might be a simple name or a fully-qualified name. + #[prost(string, tag = "1")] + pub file: ::prost::alloc::string::String, + /// Optional. Line within the source file. 1-based; 0 indicates no line number + /// available. + #[prost(int64, tag = "2")] + pub line: i64, + /// Optional. Human-readable name of the function or method being invoked, with + /// optional context such as the class or package name. This information may be + /// used in contexts such as the logs viewer, where a file and line number are + /// less meaningful. The format can vary by language. For example: + /// `qual.if.ied.Class.method` (Java), `dir/package.func` (Go), `function` + /// (Python). + #[prost(string, tag = "3")] + pub function: ::prost::alloc::string::String, +} +/// Additional information used to correlate multiple log entries. Used when a +/// single LogEntry would exceed the Google Cloud Logging size limit and is +/// split across multiple log entries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LogSplit { + /// A globally unique identifier for all log entries in a sequence of split log + /// entries. All log entries with the same |LogSplit.uid| are assumed to be + /// part of the same sequence of split log entries. + #[prost(string, tag = "1")] + pub uid: ::prost::alloc::string::String, + /// The index of this LogEntry in the sequence of split log entries. Log + /// entries are given |index| values 0, 1, ..., n-1 for a sequence of n log + /// entries. + #[prost(int32, tag = "2")] + pub index: i32, + /// The total number of log entries that the original LogEntry was split into. + #[prost(int32, tag = "3")] + pub total_splits: i32, +} +/// The parameters to DeleteLog. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteLogRequest { + /// Required. The resource name of the log to delete: + /// + /// * `projects/\[PROJECT_ID]/logs/[LOG_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]` + /// * `folders/\[FOLDER_ID]/logs/[LOG_ID\]` + /// + /// `\[LOG_ID\]` must be URL-encoded. For example, + /// `"projects/my-project-id/logs/syslog"`, + /// `"organizations/123/logs/cloudaudit.googleapis.com%2Factivity"`. + /// + /// For more information about log names, see + /// \[LogEntry][google.logging.v2.LogEntry\]. + #[prost(string, tag = "1")] + pub log_name: ::prost::alloc::string::String, +} +/// The parameters to WriteLogEntries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteLogEntriesRequest { + /// Optional. A default log resource name that is assigned to all log entries + /// in `entries` that do not specify a value for `log_name`: + /// + /// * `projects/\[PROJECT_ID]/logs/[LOG_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/logs/[LOG_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/logs/[LOG_ID\]` + /// * `folders/\[FOLDER_ID]/logs/[LOG_ID\]` + /// + /// `\[LOG_ID\]` must be URL-encoded. For example: + /// + /// "projects/my-project-id/logs/syslog" + /// "organizations/123/logs/cloudaudit.googleapis.com%2Factivity" + /// + /// The permission `logging.logEntries.create` is needed on each project, + /// organization, billing account, or folder that is receiving new log + /// entries, whether the resource is specified in `logName` or in an + /// individual log entry. + #[prost(string, tag = "1")] + pub log_name: ::prost::alloc::string::String, + /// Optional. A default monitored resource object that is assigned to all log + /// entries in `entries` that do not specify a value for `resource`. Example: + /// + /// { "type": "gce_instance", + /// "labels": { + /// "zone": "us-central1-a", "instance_id": "00000000000000000000" }} + /// + /// See \[LogEntry][google.logging.v2.LogEntry\]. + #[prost(message, optional, tag = "2")] + pub resource: ::core::option::Option, + /// Optional. Default labels that are added to the `labels` field of all log + /// entries in `entries`. If a log entry already has a label with the same key + /// as a label in this parameter, then the log entry's label is not changed. + /// See \[LogEntry][google.logging.v2.LogEntry\]. + #[prost(map = "string, string", tag = "3")] + pub labels: + ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, + /// Required. The log entries to send to Logging. The order of log + /// entries in this list does not matter. Values supplied in this method's + /// `log_name`, `resource`, and `labels` fields are copied into those log + /// entries in this list that do not include values for their corresponding + /// fields. For more information, see the + /// \[LogEntry][google.logging.v2.LogEntry\] type. + /// + /// If the `timestamp` or `insert_id` fields are missing in log entries, then + /// this method supplies the current time or a unique identifier, respectively. + /// The supplied values are chosen so that, among the log entries that did not + /// supply their own values, the entries earlier in the list will sort before + /// the entries later in the list. See the `entries.list` method. + /// + /// Log entries with timestamps that are more than the + /// [logs retention period]() in + /// the past or more than 24 hours in the future will not be available when + /// calling `entries.list`. However, those log entries can still be [exported + /// with + /// LogSinks](). + /// + /// To improve throughput and to avoid exceeding the + /// [quota limit]() for calls to + /// `entries.write`, you should try to include several log entries in this + /// list, rather than calling this method for each individual log entry. + #[prost(message, repeated, tag = "4")] + pub entries: ::prost::alloc::vec::Vec, + /// Optional. Whether valid entries should be written even if some other + /// entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any + /// entry is not written, then the response status is the error associated + /// with one of the failed entries and the response includes error details + /// keyed by the entries' zero-based index in the `entries.write` method. + #[prost(bool, tag = "5")] + pub partial_success: bool, + /// Optional. If true, the request should expect normal response, but the + /// entries won't be persisted nor exported. Useful for checking whether the + /// logging API endpoints are working properly before sending valuable data. + #[prost(bool, tag = "6")] + pub dry_run: bool, +} +/// Result returned from WriteLogEntries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteLogEntriesResponse {} +/// Error details for WriteLogEntries with partial success. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WriteLogEntriesPartialErrors { + /// When `WriteLogEntriesRequest.partial_success` is true, records the error + /// status for entries that were not written due to a permanent error, keyed + /// by the entry's zero-based index in `WriteLogEntriesRequest.entries`. + /// + /// Failed requests for which no entries are written will not include + /// per-entry errors. + #[prost(map = "int32, message", tag = "1")] + pub log_entry_errors: ::std::collections::HashMap, +} +/// The parameters to `ListLogEntries`. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListLogEntriesRequest { + /// Required. Names of one or more parent resources from which to + /// retrieve log entries: + /// + /// * `projects/\[PROJECT_ID\]` + /// * `organizations/\[ORGANIZATION_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` + /// * `folders/\[FOLDER_ID\]` + /// + /// May alternatively be one or more views: + /// + /// * `projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// + /// Projects listed in the `project_ids` field are added to this list. + #[prost(string, repeated, tag = "8")] + pub resource_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. A filter that chooses which log entries to return. See [Advanced + /// Logs Queries](). + /// Only log entries that match the filter are returned. An empty filter + /// matches all log entries in the resources listed in `resource_names`. + /// Referencing a parent resource that is not listed in `resource_names` will + /// cause the filter to return no results. The maximum length of the filter is + /// 20000 characters. + #[prost(string, tag = "2")] + pub filter: ::prost::alloc::string::String, + /// Optional. How the results should be sorted. Presently, the only permitted + /// values are `"timestamp asc"` (default) and `"timestamp desc"`. The first + /// option returns entries in order of increasing values of + /// `LogEntry.timestamp` (oldest first), and the second option returns entries + /// in order of decreasing timestamps (newest first). Entries with equal + /// timestamps are returned in order of their `insert_id` values. + #[prost(string, tag = "3")] + pub order_by: ::prost::alloc::string::String, + /// Optional. The maximum number of results to return from this request. Default is 50. + /// If the value is negative or exceeds 1000, the request is rejected. The + /// presence of `next_page_token` in the response indicates that more results + /// might be available. + #[prost(int32, tag = "4")] + pub page_size: i32, + /// Optional. If present, then retrieve the next batch of results from the + /// preceding call to this method. `page_token` must be the value of + /// `next_page_token` from the previous response. The values of other method + /// parameters should be identical to those in the previous call. + #[prost(string, tag = "5")] + pub page_token: ::prost::alloc::string::String, +} +/// Result returned from `ListLogEntries`. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListLogEntriesResponse { + /// A list of log entries. If `entries` is empty, `nextPageToken` may still be + /// returned, indicating that more entries may exist. See `nextPageToken` for + /// more information. + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, + /// If there might be more results than those appearing in this response, then + /// `nextPageToken` is included. To get the next set of results, call this + /// method again using the value of `nextPageToken` as `pageToken`. + /// + /// If a value for `next_page_token` appears and the `entries` field is empty, + /// it means that the search found no log entries so far but it did not have + /// time to search all the possible log entries. Retry the method with this + /// value for `page_token` to continue the search. Alternatively, consider + /// speeding up the search by changing your filter to specify a single log name + /// or resource type, or to narrow the time range of the search. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, +} +/// The parameters to ListMonitoredResourceDescriptors +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListMonitoredResourceDescriptorsRequest { + /// Optional. The maximum number of results to return from this request. + /// Non-positive values are ignored. The presence of `nextPageToken` in the + /// response indicates that more results might be available. + #[prost(int32, tag = "1")] + pub page_size: i32, + /// Optional. If present, then retrieve the next batch of results from the + /// preceding call to this method. `pageToken` must be the value of + /// `nextPageToken` from the previous response. The values of other method + /// parameters should be identical to those in the previous call. + #[prost(string, tag = "2")] + pub page_token: ::prost::alloc::string::String, +} +/// Result returned from ListMonitoredResourceDescriptors. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListMonitoredResourceDescriptorsResponse { + /// A list of resource descriptors. + #[prost(message, repeated, tag = "1")] + pub resource_descriptors: + ::prost::alloc::vec::Vec, + /// If there might be more results than those appearing in this response, then + /// `nextPageToken` is included. To get the next set of results, call this + /// method again using the value of `nextPageToken` as `pageToken`. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, +} +/// The parameters to ListLogs. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListLogsRequest { + /// Required. The resource name that owns the logs: + /// + /// * `projects/\[PROJECT_ID\]` + /// * `organizations/\[ORGANIZATION_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` + /// * `folders/\[FOLDER_ID\]` + #[prost(string, tag = "1")] + pub parent: ::prost::alloc::string::String, + /// Optional. The maximum number of results to return from this request. + /// Non-positive values are ignored. The presence of `nextPageToken` in the + /// response indicates that more results might be available. + #[prost(int32, tag = "2")] + pub page_size: i32, + /// Optional. If present, then retrieve the next batch of results from the + /// preceding call to this method. `pageToken` must be the value of + /// `nextPageToken` from the previous response. The values of other method + /// parameters should be identical to those in the previous call. + #[prost(string, tag = "3")] + pub page_token: ::prost::alloc::string::String, + /// Optional. The resource name that owns the logs: + /// + /// * `projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// + /// To support legacy queries, it could also be: + /// + /// * `projects/\[PROJECT_ID\]` + /// * `organizations/\[ORGANIZATION_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` + /// * `folders/\[FOLDER_ID\]` + #[prost(string, repeated, tag = "8")] + pub resource_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Result returned from ListLogs. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListLogsResponse { + /// A list of log names. For example, + /// `"projects/my-project/logs/syslog"` or + /// `"organizations/123/logs/cloudresourcemanager.googleapis.com%2Factivity"`. + #[prost(string, repeated, tag = "3")] + pub log_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// If there might be more results than those appearing in this response, then + /// `nextPageToken` is included. To get the next set of results, call this + /// method again using the value of `nextPageToken` as `pageToken`. + #[prost(string, tag = "2")] + pub next_page_token: ::prost::alloc::string::String, +} +/// The parameters to `TailLogEntries`. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TailLogEntriesRequest { + /// Required. Name of a parent resource from which to retrieve log entries: + /// + /// * `projects/\[PROJECT_ID\]` + /// * `organizations/\[ORGANIZATION_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID\]` + /// * `folders/\[FOLDER_ID\]` + /// + /// May alternatively be one or more views: + /// + /// * `projects/\[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `organizations/\[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `billingAccounts/\[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + /// * `folders/\[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID\]` + #[prost(string, repeated, tag = "1")] + pub resource_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Optional. A filter that chooses which log entries to return. See [Advanced + /// Logs Filters](). + /// Only log entries that match the filter are returned. An empty filter + /// matches all log entries in the resources listed in `resource_names`. + /// Referencing a parent resource that is not in `resource_names` will cause + /// the filter to return no results. The maximum length of the filter is 20000 + /// characters. + #[prost(string, tag = "2")] + pub filter: ::prost::alloc::string::String, + /// Optional. The amount of time to buffer log entries at the server before + /// being returned to prevent out of order results due to late arriving log + /// entries. Valid values are between 0-60000 milliseconds. Defaults to 2000 + /// milliseconds. + #[prost(message, optional, tag = "3")] + pub buffer_window: ::core::option::Option<::prost_types::Duration>, +} +/// Result returned from `TailLogEntries`. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TailLogEntriesResponse { + /// A list of log entries. Each response in the stream will order entries with + /// increasing values of `LogEntry.timestamp`. Ordering is not guaranteed + /// between separate responses. + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, + /// If entries that otherwise would have been included in the session were not + /// sent back to the client, counts of relevant entries omitted from the + /// session with the reason that they were not included. There will be at most + /// one of each reason per response. The counts represent the number of + /// suppressed entries since the last streamed response. + #[prost(message, repeated, tag = "2")] + pub suppression_info: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `TailLogEntriesResponse`. +pub mod tail_log_entries_response { + /// Information about entries that were omitted from the session. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct SuppressionInfo { + /// The reason that entries were omitted from the session. + #[prost(enumeration = "suppression_info::Reason", tag = "1")] + pub reason: i32, + /// A lower bound on the count of entries omitted due to `reason`. + #[prost(int32, tag = "2")] + pub suppressed_count: i32, + } + /// Nested message and enum types in `SuppressionInfo`. + pub mod suppression_info { + /// An indicator of why entries were omitted. + #[derive( + Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration, + )] + #[repr(i32)] + pub enum Reason { + /// Unexpected default. + Unspecified = 0, + /// Indicates suppression occurred due to relevant entries being + /// received in excess of rate limits. For quotas and limits, see + /// [Logging API quotas and + /// limits](). + RateLimit = 1, + /// Indicates suppression occurred due to the client not consuming + /// responses quickly enough. + NotConsumed = 2, + } + impl Reason { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Reason::Unspecified => "REASON_UNSPECIFIED", + Reason::RateLimit => "RATE_LIMIT", + Reason::NotConsumed => "NOT_CONSUMED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REASON_UNSPECIFIED" => Some(Self::Unspecified), + "RATE_LIMIT" => Some(Self::RateLimit), + "NOT_CONSUMED" => Some(Self::NotConsumed), + _ => None, + } + } + } + } +} +/// Generated client implementations. +pub mod logging_service_v2_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::http::Uri; + use tonic::codegen::*; + /// Service for ingesting and querying logs. + #[derive(Debug, Clone)] + pub struct LoggingServiceV2Client { + inner: tonic::client::Grpc, + } + impl LoggingServiceV2Client { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl LoggingServiceV2Client + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> LoggingServiceV2Client> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + >>::Error: + Into + Send + Sync, + { + LoggingServiceV2Client::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Deletes all the log entries in a log for the _Default Log Bucket. The log + /// reappears if it receives new entries. Log entries written shortly before + /// the delete operation might not be deleted. Entries received after the + /// delete operation with a timestamp before the operation will be deleted. + pub async fn delete_log( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.logging.v2.LoggingServiceV2/DeleteLog", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.logging.v2.LoggingServiceV2", + "DeleteLog", + )); + self.inner.unary(req, path, codec).await + } + /// Writes log entries to Logging. This API method is the + /// only way to send log entries to Logging. This method + /// is used, directly or indirectly, by the Logging agent + /// (fluentd) and all logging libraries configured to use Logging. + /// A single request may contain log entries for a maximum of 1000 + /// different resources (projects, organizations, billing accounts or + /// folders) + pub async fn write_log_entries( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.logging.v2.LoggingServiceV2/WriteLogEntries", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.logging.v2.LoggingServiceV2", + "WriteLogEntries", + )); + self.inner.unary(req, path, codec).await + } + /// Lists log entries. Use this method to retrieve log entries that originated + /// from a project/folder/organization/billing account. For ways to export log + /// entries, see [Exporting + /// Logs](https://cloud.google.com/logging/docs/export). + pub async fn list_log_entries( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.logging.v2.LoggingServiceV2/ListLogEntries", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.logging.v2.LoggingServiceV2", + "ListLogEntries", + )); + self.inner.unary(req, path, codec).await + } + /// Lists the descriptors for monitored resource types used by Logging. + pub async fn list_monitored_resource_descriptors( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.logging.v2.LoggingServiceV2/ListMonitoredResourceDescriptors", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.logging.v2.LoggingServiceV2", + "ListMonitoredResourceDescriptors", + )); + self.inner.unary(req, path, codec).await + } + /// Lists the logs in projects, organizations, folders, or billing accounts. + /// Only logs that have entries are listed. + pub async fn list_logs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.logging.v2.LoggingServiceV2/ListLogs", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.logging.v2.LoggingServiceV2", + "ListLogs", + )); + self.inner.unary(req, path, codec).await + } + /// Streaming read of log entries as they are ingested. Until the stream is + /// terminated, it will continue reading logs. + pub async fn tail_log_entries( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> std::result::Result< + tonic::Response>, + tonic::Status, + > { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/google.logging.v2.LoggingServiceV2/TailLogEntries", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut().insert(GrpcMethod::new( + "google.logging.v2.LoggingServiceV2", + "TailLogEntries", + )); + self.inner.streaming(req, path, codec).await + } + } +} diff --git a/opentelemetry-stackdriver/src/proto/mod.rs b/opentelemetry-stackdriver/src/proto/mod.rs new file mode 100644 index 00000000..631d8301 --- /dev/null +++ b/opentelemetry-stackdriver/src/proto/mod.rs @@ -0,0 +1,14 @@ +pub mod api; + +pub mod devtools { + pub mod cloudtrace { + pub mod v2; + } +} + +pub mod logging { + pub mod r#type; + pub mod v2; +} + +pub mod rpc; diff --git a/opentelemetry-stackdriver/src/proto/rpc.rs b/opentelemetry-stackdriver/src/proto/rpc.rs new file mode 100644 index 00000000..e20cb148 --- /dev/null +++ b/opentelemetry-stackdriver/src/proto/rpc.rs @@ -0,0 +1,25 @@ +/// The `Status` type defines a logical error model that is suitable for +/// different programming environments, including REST APIs and RPC APIs. It is +/// used by \[gRPC\](). Each `Status` message contains +/// three pieces of data: error code, error message, and error details. +/// +/// You can find out more about this error model and how to work with it in the +/// [API Design Guide](). +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Status { + /// The status code, which should be an enum value of + /// \[google.rpc.Code][google.rpc.Code\]. + #[prost(int32, tag = "1")] + pub code: i32, + /// A developer-facing error message, which should be in English. Any + /// user-facing error message should be localized and sent in the + /// \[google.rpc.Status.details][google.rpc.Status.details\] field, or localized + /// by the client. + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, + /// A list of messages that carry the error details. There is a common set of + /// message types for APIs to use. + #[prost(message, repeated, tag = "3")] + pub details: ::prost::alloc::vec::Vec<::prost_types::Any>, +} diff --git a/opentelemetry-stackdriver/tests/generate.rs b/opentelemetry-stackdriver/tests/generate.rs new file mode 100644 index 00000000..d7284fca --- /dev/null +++ b/opentelemetry-stackdriver/tests/generate.rs @@ -0,0 +1,261 @@ +use std::collections::HashMap; +use std::ffi::OsStr; +use std::fs; +use std::path::PathBuf; +use std::process::Command; + +use futures_util::stream::FuturesUnordered; +use futures_util::stream::StreamExt; +use walkdir::WalkDir; + +/// Download the latest protobuf schemas from the Google APIs GitHub repository. +/// +/// This test is ignored by default, but can be run with `cargo test sync_schemas -- --ignored`. +#[tokio::test] +#[ignore] +async fn sync_schemas() { + let client = reqwest::Client::new(); + let cache = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("proto/google"); + let schemas = PREREQUISITE_SCHEMAS + .iter() + .chain(GENERATE_FROM_SCHEMAS.iter()); + + let mut futures = FuturesUnordered::new(); + for path in schemas.copied() { + let filename = cache.join(path); + let client = client.clone(); + futures.push(async move { + let url = format!("{BASE_URI}/{path}"); + let rsp = client.get(url).send().await.unwrap(); + let body = rsp.text().await.unwrap(); + fs::create_dir_all(filename.parent().unwrap()).unwrap(); + fs::write(filename, body).unwrap(); + }); + } + + while futures.next().await.is_some() {} +} + +/// Use the protobuf schemas downloaded by the `sync_schemas` test to generate code. +/// +/// This test will fail if the code currently in the repository is different from the +/// newly generated code, and will update it in place in that case. +#[test] +fn generated_code_is_fresh() { + // Generate code into a temporary directory. + + let schemas = GENERATE_FROM_SCHEMAS + .iter() + .map(|s| format!("google/{s}")) + .collect::>(); + + let tmp_dir = tempfile::tempdir().unwrap(); + fs::create_dir_all(&tmp_dir).unwrap(); + tonic_build::configure() + .build_client(true) + .build_server(false) + .out_dir(&tmp_dir) + .compile(&schemas, &["proto"]) + .unwrap(); + + // Next, wrangle the generated file names into a directory hierarchy. + + let (mut modules, mut renames) = (Vec::new(), Vec::new()); + for entry in fs::read_dir(&tmp_dir).unwrap() { + let path = entry.unwrap().path(); + + // Tonic now uses prettyplease instead of rustfmt, which causes a + // number of differences in the generated code. + Command::new("rustfmt") + .arg("--edition=2021") + .arg(&path) + .output() + .unwrap(); + + let file_name_str = path.file_name().and_then(|s| s.to_str()).unwrap(); + let (base, _) = file_name_str + .strip_prefix("google.") + .unwrap() + .rsplit_once('.') + .unwrap(); + + let new = match base.rsplit_once('.') { + Some((dir, fname)) => { + let mut module = dir.split('.').map(|s| s.to_owned()).collect::>(); + module.push(fname.to_owned()); + modules.push(module); + tmp_dir + .path() + .join(dir.replace('.', "/").replace("r#", "")) + .join(format!("{}.rs", fname.replace("r#", ""))) + } + None => { + let new = tmp_dir + .path() + .join(format!("{}.rs", base.replace("r#", ""))); + modules.push(vec![base.to_owned()]); + new + } + }; + + renames.push((path, new)); + } + + // Rename the files into place after iterating over the old version. + + for (old, new) in renames { + fs::create_dir_all(new.parent().unwrap()).unwrap(); + fs::rename(old, new).unwrap(); + } + + // Build the module root and write it to `mod.rs`. + + modules.sort_unstable(); + let mut previous: &[String] = &[]; + let (mut root, mut level) = (String::new(), 0); + for module in &modules { + // Find out how many modules to close and what modules to open. + + let parent = &module[..module.len() - 1]; + let (mut close, mut open) = (0, vec![]); + let components = Ord::max(previous.len(), parent.len()); + for i in 0..components { + let (prev, cur) = (previous.get(i), parent.get(i)); + if prev == cur && close == 0 && open.is_empty() { + continue; + } + + match (prev, cur) { + (Some(_), Some(new)) => { + close += 1; + open.push(new); + } + (Some(_), None) => close += 1, + (None, Some(new)) => open.push(new), + (None, None) => unreachable!(), + } + } + + // Close modules. + + let closed = close > 0; + while close > 0 { + for _ in 0..((level - 1) * 4) { + root.push(' '); + } + root.push_str("}\n"); + close -= 1; + level -= 1; + } + + if closed { + root.push('\n'); + } + + // Open modules. + + let mut opened = false; + for component in &open { + if !opened && !closed { + root.push('\n'); + opened = true; + } + + for _ in 0..(level * 4) { + root.push(' '); + } + + root.push_str("pub mod "); + root.push_str(component); + root.push_str(" {\n"); + level += 1; + } + + // Write a module declaration for this actual module. + + for _ in 0..(level * 4) { + root.push(' '); + } + root.push_str("pub mod "); + root.push_str(module.last().unwrap()); + root.push_str(";\n"); + previous = parent; + } + + while level > 0 { + level -= 1; + for _ in 0..(level * 4) { + root.push(' '); + } + root.push_str("}\n"); + } + + fs::write(tmp_dir.path().join("mod.rs"), root).unwrap(); + + // Move on to actually comparing the old and new versions. + + let versions = [SOURCE_DIR, tmp_dir.path().to_str().unwrap()] + .iter() + .map(|path| { + let mut files = HashMap::new(); + for entry in WalkDir::new(path) { + let entry = match entry { + Ok(e) => e, + Err(_) => continue, + }; + + let is_file = entry.file_type().is_file(); + let rs = entry.path().extension() == Some(OsStr::new("rs")); + if !is_file || !rs { + continue; + } + + let file = entry.path(); + let name = file.strip_prefix(path).unwrap(); + files.insert(name.to_owned(), fs::read_to_string(file).unwrap()); + } + + files + }) + .collect::>(); + + // Compare the old version and new version and fail the test if they're different. + + let mut keys = versions[0].keys().collect::>(); + keys.extend(versions[1].keys()); + keys.sort_unstable(); + keys.dedup(); + + if versions[0] != versions[1] { + let _ = fs::remove_dir_all(SOURCE_DIR); + fs::rename(tmp_dir, SOURCE_DIR).unwrap(); + panic!("generated code in the repository is outdated, updating..."); + } +} + +/// Schema files used as input for the generated code. +const GENERATE_FROM_SCHEMAS: &[&str] = &[ + "devtools/cloudtrace/v2/tracing.proto", + "devtools/cloudtrace/v2/trace.proto", + "logging/type/http_request.proto", + "logging/v2/log_entry.proto", + "logging/v2/logging.proto", + "rpc/status.proto", +]; + +/// Schema files that are dependencies of the `GENERATED_SCHEMAS`. +const PREREQUISITE_SCHEMAS: &[&str] = &[ + "api/annotations.proto", + "api/resource.proto", + "api/monitored_resource.proto", + "api/field_behavior.proto", + "api/http.proto", + "api/client.proto", + "logging/type/log_severity.proto", + "api/label.proto", + "api/launch_stage.proto", + "logging/v2/logging_config.proto", +]; + +const BASE_URI: &str = "https://raw.githubusercontent.com/googleapis/googleapis/master/google"; +const SOURCE_DIR: &str = "src/proto"; diff --git a/opentelemetry-user-events-logs/CHANGELOG.md b/opentelemetry-user-events-logs/CHANGELOG.md new file mode 100644 index 00000000..c61e4476 --- /dev/null +++ b/opentelemetry-user-events-logs/CHANGELOG.md @@ -0,0 +1,15 @@ +# Changelog + +## Unreleased + +## v0.2.0 + +### Changed + +- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) + +## v0.1.0 + +### Added + +- Initial Alpha implementation diff --git a/opentelemetry-user-events-logs/CODEOWNERS b/opentelemetry-user-events-logs/CODEOWNERS new file mode 100644 index 00000000..d6962a90 --- /dev/null +++ b/opentelemetry-user-events-logs/CODEOWNERS @@ -0,0 +1,5 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. + +# For anything not explicitly taken by someone else: +* @open-telemetry/rust-approvers diff --git a/opentelemetry-user-events-logs/Cargo.toml b/opentelemetry-user-events-logs/Cargo.toml new file mode 100644 index 00000000..8b2257a5 --- /dev/null +++ b/opentelemetry-user-events-logs/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "opentelemetry-user-events-logs" +description = "OpenTelemetry-Rust exporter to userevents" +version = "0.2.0" +edition = "2021" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-logs" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-logs" +readme = "README.md" +rust-version = "1.65.0" +keywords = ["opentelemetry", "log", "trace", "user_events"] +license = "Apache-2.0" + +[dependencies] +eventheader = "0.3.2" +eventheader_dynamic = "0.3.3" +opentelemetry = { version = "0.21", features = ["logs"] } +opentelemetry_sdk = { version = "0.21", features = ["logs"] } +async-std = { version="1.6" } +async-trait = { version="0.1" } +chrono = { version="0.4", default-features = false, features=["std"] } + +[dev-dependencies] +opentelemetry-appender-tracing = { version = "0.2" } +tracing = { version = "0.1", default-features = false, features = ["std"] } +tracing-core = "0.1.31" +tracing-subscriber = { version = "0.3.0", default-features = false, features = ["registry", "std"] } +microbench = "0.5" + +[features] +logs_level_enabled = ["opentelemetry/logs_level_enabled", "opentelemetry_sdk/logs_level_enabled"] +default=["logs_level_enabled"] + +[[example]] +name = "basic" +path = "examples/basic.rs" diff --git a/opentelemetry-user-events-logs/LICENSE b/opentelemetry-user-events-logs/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/opentelemetry-user-events-logs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/opentelemetry-user-events-logs/README.md b/opentelemetry-user-events-logs/README.md new file mode 100644 index 00000000..20d186b8 --- /dev/null +++ b/opentelemetry-user-events-logs/README.md @@ -0,0 +1,17 @@ +![OpenTelemetry — An observability framework for cloud-native software.][splash] + +[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png + +# OpenTelemetry user_events Exporter + +## Overview + +[user_events](https://docs.kernel.org/trace/user_events.html) is Linux solution for user process tracing, similar to ETW (Event Tracing for Windows) on Windows. It builds on top of the Linux Tracepoints, and so allows user processes to create events and trace data that can be viewed via existing tools like ftrace and perf. + +This kernel feature is supported started in Linux kernel 5.18 onwards. The feature enables + - A faster path for tracing from user mode application utilizing kernel mode memory address space. + - User processes can now export telemetry events only when it is useful i.e, when the registered set of tracepoint events are enabled. + + This user_events exporter enables applications to use OpenTelemetry API to capture the telemetry events, and write to user_events subsystem. From user_events, the events can be + - Captured by the agents running locally, and listening for specific events withing user_events subsystem. + - Or real-time monitoring using local Linux tool like [perf](https://perf.wiki.kernel.org/index.php/Main_Page) or ftrace. diff --git a/opentelemetry-user-events-logs/examples/basic.rs b/opentelemetry-user-events-logs/examples/basic.rs new file mode 100644 index 00000000..67977cd9 --- /dev/null +++ b/opentelemetry-user-events-logs/examples/basic.rs @@ -0,0 +1,38 @@ +//! run with `$ cargo run --example basic --all-features + +use opentelemetry_appender_tracing::layer; +use opentelemetry_sdk::logs::LoggerProvider; +use opentelemetry_user_events_logs::{ExporterConfig, ReentrantLogProcessor}; +use std::collections::HashMap; +use tracing::error; +use tracing_subscriber::prelude::*; + +fn init_logger() -> LoggerProvider { + let exporter_config = ExporterConfig { + default_keyword: 1, + keywords_map: HashMap::new(), + }; + let reenterant_processor = ReentrantLogProcessor::new("test", None, exporter_config); + LoggerProvider::builder() + .with_log_processor(reenterant_processor) + .build() +} + +fn main() { + // Example with tracing appender. + let logger_provider = init_logger(); + let layer = layer::OpenTelemetryTracingBridge::new(&logger_provider); + tracing_subscriber::registry().with(layer).init(); + + // event_name is now passed as an attribute, but once https://github.com/tokio-rs/tracing/issues/1426 + // is done, it can be passed with name:"my-event-name", so it'll be available as metadata for + // fast filtering. + // event_id is also passed as an attribute now, there is nothing in metadata where a + // numeric id can be stored. + error!( + name: "my-event-name", + event_id = 20, + user_name = "otel user", + user_email = "otel@opentelemetry.io" + ); +} diff --git a/opentelemetry-user-events-logs/src/lib.rs b/opentelemetry-user-events-logs/src/lib.rs new file mode 100644 index 00000000..93df8c77 --- /dev/null +++ b/opentelemetry-user-events-logs/src/lib.rs @@ -0,0 +1,8 @@ +//! The user_events exporter will enable applications to use OpenTelemetry API +//! to capture the telemetry events, and write to user_events subsystem. + +#![warn(missing_debug_implementations, missing_docs)] + +mod logs; + +pub use logs::*; diff --git a/opentelemetry-user-events-logs/src/logs/exporter.rs b/opentelemetry-user-events-logs/src/logs/exporter.rs new file mode 100644 index 00000000..3825af49 --- /dev/null +++ b/opentelemetry-user-events-logs/src/logs/exporter.rs @@ -0,0 +1,347 @@ +use async_trait::async_trait; +use eventheader::{FieldFormat, Level, Opcode}; +use eventheader_dynamic::EventBuilder; +use std::borrow::Cow; +use std::collections::HashMap; +use std::fmt::Debug; +use std::sync::Arc; + +use opentelemetry::{logs::AnyValue, logs::Severity, Key}; +use std::{cell::RefCell, str, time::SystemTime}; + +/// Provider group associated with the user_events exporter +pub type ProviderGroup = Option>; + +thread_local! { static EBW: RefCell = RefCell::new(EventBuilder::new());} + +/// Exporter config +#[derive(Debug)] +pub struct ExporterConfig { + /// keyword associated with user_events name + /// These should be mapped to logger_name as of now. + pub keywords_map: HashMap, + /// default keyword if map is not defined. + pub default_keyword: u64, +} + +impl Default for ExporterConfig { + fn default() -> Self { + ExporterConfig { + keywords_map: HashMap::new(), + default_keyword: 1, + } + } +} + +impl ExporterConfig { + pub(crate) fn get_log_keyword(&self, name: &str) -> Option { + self.keywords_map.get(name).copied() + } + + pub(crate) fn get_log_keyword_or_default(&self, name: &str) -> Option { + if self.keywords_map.is_empty() { + Some(self.default_keyword) + } else { + self.get_log_keyword(name) + } + } +} +pub(crate) struct UserEventsExporter { + provider: Arc, + exporter_config: ExporterConfig, +} + +const EVENT_ID: &str = "event_id"; +const EVENT_NAME_PRIMARY: &str = "event_name"; +const EVENT_NAME_SECONDARY: &str = "name"; + +//TBD - How to configure provider name and provider group +impl UserEventsExporter { + pub(crate) fn new( + provider_name: &str, + _provider_group: ProviderGroup, + exporter_config: ExporterConfig, + ) -> Self { + let mut options = eventheader_dynamic::Provider::new_options(); + options = *options.group_name(provider_name); + let mut eventheader_provider: eventheader_dynamic::Provider = + eventheader_dynamic::Provider::new(provider_name, &options); + Self::register_keywords(&mut eventheader_provider, &exporter_config); + UserEventsExporter { + provider: Arc::new(eventheader_provider), + exporter_config, + } + } + + fn register_events(eventheader_provider: &mut eventheader_dynamic::Provider, keyword: u64) { + let levels = [ + eventheader::Level::Informational, + eventheader::Level::Verbose, + eventheader::Level::Warning, + eventheader::Level::Error, + eventheader::Level::CriticalError, + ]; + + for &level in levels.iter() { + eventheader_provider.register_set(level, keyword); + } + } + + fn register_keywords( + eventheader_provider: &mut eventheader_dynamic::Provider, + exporter_config: &ExporterConfig, + ) { + if exporter_config.keywords_map.is_empty() { + println!( + "Register default keyword {}", + exporter_config.default_keyword + ); + Self::register_events(eventheader_provider, exporter_config.default_keyword); + } + + for keyword in exporter_config.keywords_map.values() { + Self::register_events(eventheader_provider, *keyword); + } + } + + fn add_attribute_to_event(&self, eb: &mut EventBuilder, attrib: &(Key, AnyValue)) { + let field_name = &attrib.0.to_string(); + match attrib.1.to_owned() { + AnyValue::Boolean(b) => { + eb.add_value(field_name, b, FieldFormat::Boolean, 0); + } + AnyValue::Int(i) => { + eb.add_value(field_name, i, FieldFormat::SignedInt, 0); + } + AnyValue::Double(f) => { + eb.add_value(field_name, f, FieldFormat::Float, 0); + } + AnyValue::String(s) => { + eb.add_str(field_name, &s.to_string(), FieldFormat::Default, 0); + } + _ => (), + } + } + + fn get_severity_level(&self, severity: Severity) -> Level { + match severity { + Severity::Debug + | Severity::Debug2 + | Severity::Debug3 + | Severity::Debug4 + | Severity::Trace + | Severity::Trace2 + | Severity::Trace3 + | Severity::Trace4 => eventheader::Level::Verbose, + + Severity::Info | Severity::Info2 | Severity::Info3 | Severity::Info4 => { + eventheader::Level::Informational + } + + Severity::Error | Severity::Error2 | Severity::Error3 | Severity::Error4 => { + eventheader::Level::Error + } + + Severity::Fatal | Severity::Fatal2 | Severity::Fatal3 | Severity::Fatal4 => { + eventheader::Level::CriticalError + } + + Severity::Warn | Severity::Warn2 | Severity::Warn3 | Severity::Warn4 => { + eventheader::Level::Warning + } + } + } + + #[allow(dead_code)] + fn enabled(&self, level: u8, keyword: u64) -> bool { + let es = self.provider.find_set(level.into(), keyword); + match es { + Some(x) => x.enabled(), + _ => false, + }; + false + } + + pub(crate) fn export_log_data( + &self, + log_data: &opentelemetry_sdk::export::logs::LogData, + ) -> opentelemetry_sdk::export::logs::ExportResult { + let mut level: Level = Level::Invalid; + if log_data.record.severity_number.is_some() { + level = self.get_severity_level(log_data.record.severity_number.unwrap()); + } + + let keyword = self + .exporter_config + .get_log_keyword_or_default(log_data.instrumentation.name.as_ref()); + + if keyword.is_none() { + return Ok(()); + } + + let log_es = if let Some(es) = self + .provider + .find_set(level.as_int().into(), keyword.unwrap()) + { + es + } else { + return Ok(()); + }; + if log_es.enabled() { + EBW.with(|eb| { + let mut eb = eb.borrow_mut(); + let event_tags: u32 = 0; // TBD name and event_tag values + eb.reset(log_data.instrumentation.name.as_ref(), event_tags as u16); + eb.opcode(Opcode::Info); + + eb.add_value("__csver__", 0x0401u16, FieldFormat::HexInt, 0); + + // populate CS PartA + let mut cs_a_count = 0; + let event_time: SystemTime = log_data + .record + .timestamp + .unwrap_or(log_data.record.observed_timestamp); + cs_a_count += 1; // for event_time + eb.add_struct("PartA", cs_a_count, 0); + { + let time: String = chrono::DateTime::to_rfc3339( + &chrono::DateTime::::from(event_time), + ); + eb.add_str("time", time, FieldFormat::Default, 0); + } + //populate CS PartC + let (mut is_event_id, mut event_id) = (false, 0); + let (mut is_event_name, mut event_name) = (false, ""); + + if let Some(attr_list) = &log_data.record.attributes { + let (mut is_part_c_present, mut cs_c_bookmark, mut cs_c_count) = (false, 0, 0); + for attrib in attr_list.iter() { + match (attrib.0.as_str(), &attrib.1) { + (EVENT_ID, AnyValue::Int(value)) => { + is_event_id = true; + event_id = *value; + continue; + } + (EVENT_NAME_PRIMARY, AnyValue::String(value)) => { + is_event_name = true; + event_name = value.as_str(); + continue; + } + (EVENT_NAME_SECONDARY, AnyValue::String(value)) => { + if !is_event_name { + event_name = value.as_str(); + } + continue; + } + _ => { + if !is_part_c_present { + eb.add_struct_with_bookmark("PartC", 1, 0, &mut cs_c_bookmark); + is_part_c_present = true; + } + self.add_attribute_to_event(&mut eb, attrib); + cs_c_count += 1; + } + } + } + + if is_part_c_present { + eb.set_struct_field_count(cs_c_bookmark, cs_c_count); + } + } + // populate CS PartB + let mut cs_b_bookmark: usize = 0; + let mut cs_b_count = 0; + eb.add_struct_with_bookmark("PartB", 1, 0, &mut cs_b_bookmark); + eb.add_str("_typeName", "Logs", FieldFormat::Default, 0); + cs_b_count += 1; + + if log_data.record.body.is_some() { + eb.add_str( + "body", + match log_data.record.body.as_ref().unwrap() { + AnyValue::Int(value) => value.to_string(), + AnyValue::String(value) => value.to_string(), + AnyValue::Boolean(value) => value.to_string(), + AnyValue::Double(value) => value.to_string(), + AnyValue::Bytes(value) => String::from_utf8_lossy(value).to_string(), + AnyValue::ListAny(_value) => "".to_string(), + AnyValue::Map(_value) => "".to_string(), + }, + FieldFormat::Default, + 0, + ); + cs_b_count += 1; + } + if level != Level::Invalid { + eb.add_value("severityNumber", level.as_int(), FieldFormat::SignedInt, 0); + cs_b_count += 1; + } + if log_data.record.severity_text.is_some() { + eb.add_str( + "severityText", + log_data.record.severity_text.as_ref().unwrap().as_ref(), + FieldFormat::SignedInt, + 0, + ); + cs_b_count += 1; + } + if is_event_id { + eb.add_value("eventId", event_id, FieldFormat::SignedInt, 0); + cs_b_count += 1; + } + if !event_name.is_empty() { + eb.add_str("name", event_name, FieldFormat::Default, 0); + cs_b_count += 1; + } + eb.set_struct_field_count(cs_b_bookmark, cs_b_count); + + eb.write(&log_es, None, None); + }); + return Ok(()); + } + Ok(()) + } +} + +impl Debug for UserEventsExporter { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("user_events log exporter") + } +} + +#[async_trait] +impl opentelemetry_sdk::export::logs::LogExporter for UserEventsExporter { + async fn export( + &mut self, + batch: Vec, + ) -> opentelemetry::logs::LogResult<()> { + for log_data in batch { + let _ = self.export_log_data(&log_data); + } + Ok(()) + } + + #[cfg(feature = "logs_level_enabled")] + fn event_enabled(&self, level: Severity, _target: &str, name: &str) -> bool { + let (found, keyword) = if self.exporter_config.keywords_map.is_empty() { + (true, self.exporter_config.default_keyword) + } else { + // TBD - target is not used as of now for comparison. + match self.exporter_config.get_log_keyword(name) { + Some(x) => (true, x), + _ => (false, 0), + } + }; + if !found { + return false; + } + let es = self + .provider + .find_set(self.get_severity_level(level), keyword); + match es { + Some(x) => x.enabled(), + _ => false, + } + } +} diff --git a/opentelemetry-user-events-logs/src/logs/mod.rs b/opentelemetry-user-events-logs/src/logs/mod.rs new file mode 100644 index 00000000..7f0e2681 --- /dev/null +++ b/opentelemetry-user-events-logs/src/logs/mod.rs @@ -0,0 +1,5 @@ +mod exporter; +pub use exporter::*; + +mod reentrant_logprocessor; +pub use reentrant_logprocessor::*; diff --git a/opentelemetry-user-events-logs/src/logs/reentrant_logprocessor.rs b/opentelemetry-user-events-logs/src/logs/reentrant_logprocessor.rs new file mode 100644 index 00000000..d66447b8 --- /dev/null +++ b/opentelemetry-user-events-logs/src/logs/reentrant_logprocessor.rs @@ -0,0 +1,61 @@ +use std::fmt::Debug; + +use opentelemetry::logs::LogResult; +use opentelemetry_sdk::export::logs::LogData; + +#[cfg(feature = "logs_level_enabled")] +use opentelemetry_sdk::export::logs::LogExporter; + +use crate::logs::exporter::ExporterConfig; +use crate::logs::exporter::*; + +/// This export processor exports without synchronization. +/// This is currently only used in users_event exporter, where we know +/// that the underlying exporter is safe under concurrent calls + +#[derive(Debug)] +pub struct ReentrantLogProcessor { + event_exporter: UserEventsExporter, +} + +impl ReentrantLogProcessor { + /// constructor + pub fn new( + provider_name: &str, + provider_group: ProviderGroup, + exporter_config: ExporterConfig, + ) -> Self { + let exporter = UserEventsExporter::new(provider_name, provider_group, exporter_config); + ReentrantLogProcessor { + event_exporter: exporter, + } + } +} + +impl opentelemetry_sdk::logs::LogProcessor for ReentrantLogProcessor { + fn emit(&self, data: LogData) { + _ = self.event_exporter.export_log_data(&data); + } + + // This is a no-op as this processor doesn't keep anything + // in memory to be flushed out. + fn force_flush(&self) -> LogResult<()> { + Ok(()) + } + + // This is a no-op no special cleanup is required before + // shutdown. + fn shutdown(&mut self) -> LogResult<()> { + Ok(()) + } + + #[cfg(feature = "logs_level_enabled")] + fn event_enabled( + &self, + level: opentelemetry::logs::Severity, + target: &str, + name: &str, + ) -> bool { + self.event_exporter.event_enabled(level, target, name) + } +} diff --git a/opentelemetry-user-events-metrics/CHANGELOG.md b/opentelemetry-user-events-metrics/CHANGELOG.md new file mode 100644 index 00000000..6103307e --- /dev/null +++ b/opentelemetry-user-events-metrics/CHANGELOG.md @@ -0,0 +1,23 @@ +# Changelog + +## Unreleased + +## v0.2.0 + +- Fix aggregation selector and temporality so every instruments are aggregated + correctly with expected delta temporality. + [#1287](https://github.com/open-telemetry/opentelemetry-rust/pull/1287). + +### Changed + +- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) +- Include error diagnosing messages for registering tracepoint + [#1273](https://github.com/open-telemetry/opentelemetry-rust/pull/1273). +- Add version, protocol to schema + [#1224](https://github.com/open-telemetry/opentelemetry-rust/pull/1224). + +## v0.1.0 + +### Added + +- Initial Alpha implementation diff --git a/opentelemetry-user-events-metrics/CODEOWNERS b/opentelemetry-user-events-metrics/CODEOWNERS new file mode 100644 index 00000000..d6962a90 --- /dev/null +++ b/opentelemetry-user-events-metrics/CODEOWNERS @@ -0,0 +1,5 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. + +# For anything not explicitly taken by someone else: +* @open-telemetry/rust-approvers diff --git a/opentelemetry-user-events-metrics/Cargo.toml b/opentelemetry-user-events-metrics/Cargo.toml new file mode 100644 index 00000000..c050105f --- /dev/null +++ b/opentelemetry-user-events-metrics/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "opentelemetry-user-events-metrics" +version = "0.2.0" +description = "OpenTelemetry metrics exporter to user events" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-metrics" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-user-events-metrics" +readme = "README.md" +keywords = ["opentelemetry", "metrics", "user-events"] +license = "Apache-2.0" +edition = "2021" +rust-version = "1.65" + +[dependencies] +opentelemetry = { version = "0.21", features = ["metrics"] } +opentelemetry_sdk = { version = "0.21", features = ["metrics", "rt-tokio"] } +opentelemetry-proto = { version = "0.4", features = ["gen-tonic", "metrics"] } +eventheader = { version = "= 0.3.2" } +async-trait = "0.1" +prost = "0.11" + +[dev-dependencies] +tokio = { version = "1.0", features = ["full"] } + +[[example]] +name = "basic" +path = "examples/basic.rs" diff --git a/opentelemetry-user-events-metrics/LICENSE b/opentelemetry-user-events-metrics/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/opentelemetry-user-events-metrics/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/opentelemetry-user-events-metrics/README.md b/opentelemetry-user-events-metrics/README.md new file mode 100644 index 00000000..20d186b8 --- /dev/null +++ b/opentelemetry-user-events-metrics/README.md @@ -0,0 +1,17 @@ +![OpenTelemetry — An observability framework for cloud-native software.][splash] + +[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/main/assets/logo-text.png + +# OpenTelemetry user_events Exporter + +## Overview + +[user_events](https://docs.kernel.org/trace/user_events.html) is Linux solution for user process tracing, similar to ETW (Event Tracing for Windows) on Windows. It builds on top of the Linux Tracepoints, and so allows user processes to create events and trace data that can be viewed via existing tools like ftrace and perf. + +This kernel feature is supported started in Linux kernel 5.18 onwards. The feature enables + - A faster path for tracing from user mode application utilizing kernel mode memory address space. + - User processes can now export telemetry events only when it is useful i.e, when the registered set of tracepoint events are enabled. + + This user_events exporter enables applications to use OpenTelemetry API to capture the telemetry events, and write to user_events subsystem. From user_events, the events can be + - Captured by the agents running locally, and listening for specific events withing user_events subsystem. + - Or real-time monitoring using local Linux tool like [perf](https://perf.wiki.kernel.org/index.php/Main_Page) or ftrace. diff --git a/opentelemetry-user-events-metrics/examples/basic.rs b/opentelemetry-user-events-metrics/examples/basic.rs new file mode 100644 index 00000000..83e52a38 --- /dev/null +++ b/opentelemetry-user-events-metrics/examples/basic.rs @@ -0,0 +1,53 @@ +//! run with `$ cargo run --example basic --all-features +use opentelemetry::{ + metrics::{MeterProvider as _, Unit}, + KeyValue, +}; +use opentelemetry_sdk::{ + metrics::{PeriodicReader, MeterProvider as SdkMeterProvider}, + runtime, Resource, +}; +use opentelemetry_user_events_metrics::MetricsExporter; + +fn init_metrics(exporter: MetricsExporter) -> SdkMeterProvider { + let reader = PeriodicReader::builder(exporter, runtime::Tokio).build(); + SdkMeterProvider::builder() + .with_resource(Resource::new(vec![KeyValue::new( + "service.name", + "metric-demo", + )])) + .with_reader(reader) + .build() +} + +#[tokio::main] +#[allow(unused_must_use)] +async fn main() -> Result<(), Box> { + let exporter = opentelemetry_user_events_metrics::MetricsExporter::new(); + let meter_provider = init_metrics(exporter); + + let meter = meter_provider.versioned_meter( + "user-event-test", + Some("test-version"), + Some("test_url"), + Some(vec![KeyValue::new("key", "value")]), + ); + let c = meter + .f64_counter("counter_test") + .with_description("test_decription") + .with_unit(Unit::new("test_unit")) + .init(); + + c.add( + 1.0, + [ + KeyValue::new("mykey1", "myvalue1"), + KeyValue::new("mykey2", "myvalue2"), + ] + .as_ref(), + ); + + meter_provider.shutdown()?; + + Ok(()) +} diff --git a/opentelemetry-user-events-metrics/src/exporter/mod.rs b/opentelemetry-user-events-metrics/src/exporter/mod.rs new file mode 100644 index 00000000..c6e3caca --- /dev/null +++ b/opentelemetry-user-events-metrics/src/exporter/mod.rs @@ -0,0 +1,92 @@ +use crate::transform::transform_resource_metrics; +use async_trait::async_trait; +use opentelemetry::metrics::{MetricsError, Result}; +use opentelemetry_sdk::metrics::{ + data::{ResourceMetrics, Temporality}, + exporter::PushMetricsExporter, + reader::{AggregationSelector, DefaultAggregationSelector, TemporalitySelector}, + Aggregation, InstrumentKind, +}; + +use crate::tracepoint; +use eventheader::_internal as ehi; +use prost::Message; +use std::fmt::{Debug, Formatter}; +use std::pin::Pin; + +pub struct MetricsExporter { + trace_point: Pin>, +} + +impl MetricsExporter { + pub fn new() -> MetricsExporter { + let trace_point = Box::pin(ehi::TracepointState::new(0)); + // This is unsafe because if the code is used in a shared object, + // the event MUST be unregistered before the shared object unloads. + unsafe { + let _result = tracepoint::register(trace_point.as_ref()); + } + MetricsExporter { trace_point } + } +} + +impl Default for MetricsExporter { + fn default() -> Self { + Self::new() + } +} + +impl TemporalitySelector for MetricsExporter { + // This is matching OTLP exporters delta. + fn temporality(&self, kind: InstrumentKind) -> Temporality { + match kind { + InstrumentKind::Counter + | InstrumentKind::ObservableCounter + | InstrumentKind::ObservableGauge + | InstrumentKind::Histogram => Temporality::Delta, + InstrumentKind::UpDownCounter | InstrumentKind::ObservableUpDownCounter => { + Temporality::Cumulative + } + } + } +} + +impl AggregationSelector for MetricsExporter { + // TODO: this should ideally be done at SDK level by default + // without exporters having to do it. + fn aggregation(&self, kind: InstrumentKind) -> Aggregation { + DefaultAggregationSelector::new().aggregation(kind) + } +} + +impl Debug for MetricsExporter { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str("user_events metrics exporter") + } +} + +#[async_trait] +impl PushMetricsExporter for MetricsExporter { + async fn export(&self, metrics: &mut ResourceMetrics) -> Result<()> { + if self.trace_point.enabled() { + let proto_message = transform_resource_metrics(metrics); + + let mut byte_array = Vec::new(); + let _encode_result = proto_message + .encode(&mut byte_array) + .map_err(|err| MetricsError::Other(err.to_string()))?; + let _result = tracepoint::write(&self.trace_point, byte_array.as_slice()); + } + Ok(()) + } + + async fn force_flush(&self) -> Result<()> { + Ok(()) // In this implementation, flush does nothing + } + + fn shutdown(&self) -> Result<()> { + // TracepointState automatically unregisters when dropped + // https://github.com/microsoft/LinuxTracepoints-Rust/blob/main/eventheader/src/native.rs#L618 + Ok(()) + } +} diff --git a/opentelemetry-user-events-metrics/src/lib.rs b/opentelemetry-user-events-metrics/src/lib.rs new file mode 100644 index 00000000..5517451e --- /dev/null +++ b/opentelemetry-user-events-metrics/src/lib.rs @@ -0,0 +1,5 @@ +mod exporter; +mod tracepoint; +mod transform; + +pub use exporter::MetricsExporter; diff --git a/opentelemetry-user-events-metrics/src/tracepoint/mod.rs b/opentelemetry-user-events-metrics/src/tracepoint/mod.rs new file mode 100644 index 00000000..acddfe38 --- /dev/null +++ b/opentelemetry-user-events-metrics/src/tracepoint/mod.rs @@ -0,0 +1,117 @@ +use core::ffi; +use eventheader::_internal as ehi; +use opentelemetry::{global, metrics::MetricsError}; +use std::panic; +use std::pin::Pin; + +/// Protocol constant +const PROTOCOL_FIELD_VALUE: u32 = 0; +/// Protobuf definition version +const PROTOBUF_VERSION: &[u8; 8] = b"v0.19.00"; + +/// This is the command string for the event. It needs to follow the +/// [Command Format](https://docs.kernel.org/trace/user_events.html#command-format) +/// syntax, it needs to end with a "\0", and it needs to stay in sync with the +/// write function. +/// +/// Syntax is: "EventName Field1Type Field1Name;Field2Type Field2Name". +/// +/// For this event: +/// +/// - Event is named "otlp_metrics". +/// - Field 1 is named "protocol". Value 0 corresponds to protobuf. +/// - Field 2 is named "version". Corresponds to protocol version (protobuf version). +/// - Field 3 is named "buffer" and has type "variable-length array of u8". +/// +/// "__rel_loc" is a special type for variable-length fields. It requires +/// special handling in the write() method. +const METRICS_EVENT_DEF: &[u8] = + b"otlp_metrics u32 protocol;char[8] version;__rel_loc u8[] buffer;\0"; + +/// If the tracepoint is registered and enabled, writes an event. If the tracepoint +/// is unregistered or disabled, this does nothing and returns 0. You should usually +/// check [`enabled()`] and only build the buffer and call `write()` if `enabled()` +/// returns true. +/// +/// Requires: PROTOBUF_VERSION.len() == 8, buffer.len() < 65536. +/// +/// Return value is 0 for success or an errno code for error. The return value is +/// provided to help with debugging and should usually be ignored in release builds. +pub fn write(trace_point: &ehi::TracepointState, buffer: &[u8]) -> i32 { + // This must stay in sync with the METRICS_EVENT_DEF string. + // Return error -1 if buffer exceeds max size + if buffer.len() > u16::MAX as usize { + eprintln!("Buffer exceeds max length."); + return -1; + } + + if PROTOBUF_VERSION.len() != 8 { + eprintln!("Version must be char[8]."); + return -1; + } + + // The rel_loc for the buffer field stores the size and offset of the buffer. + // - High 16 bits store the size = buffer.len() + // - Low 16 bits store the offset of the buffer from the end of the rel_loc field = 0. + let buffer_rel_loc: u32 = (buffer.len() as u32) << 16; + + trace_point.write(&mut [ + // mut because the write method does some fix-ups. + ehi::EventDataDescriptor::zero(), // First item before buffer MUST be zero(). + ehi::EventDataDescriptor::from_value(&PROTOCOL_FIELD_VALUE), // protocol value 0 for protobuf + ehi::EventDataDescriptor::from_slice(PROTOBUF_VERSION), // protobuf definition version + ehi::EventDataDescriptor::from_value(&buffer_rel_loc), // rel_loc for the buffer field. + ehi::EventDataDescriptor::from_slice(buffer), // buffer field. + ]) +} + +/// Registers the passed in tracepoint. +/// +/// Requires: this tracepoint is not currently registered. +/// The tracepoint must be in a Pin<&TracepointState> because we must ensure it will never be moved +/// +/// Return value is 0 for success or -1 for failed register. +/// +/// # Safety +/// +/// If this code is used in a shared object, the tracepoint MUST be +/// unregistered before the shared object unloads from memory. +pub unsafe fn register(trace_point: Pin<&ehi::TracepointState>) -> i32 { + debug_assert!(METRICS_EVENT_DEF[METRICS_EVENT_DEF.len() - 1] == b'\0'); + + // CStr::from_bytes_with_nul_unchecked is ok because METRICS_EVENT_DEF ends with "\0". + // Returns errno code 95 if trace/debug file systems are not mounted + // Returns errno code 13 if insufficient permissions + // If tracepoint doesn't exist, it will create one automatically + let result = panic::catch_unwind(|| { + // CStr::from_bytes_with_nul_unchecked is ok because METRICS_EVENT_DEF ends with "\0". + trace_point.register(ffi::CStr::from_bytes_with_nul_unchecked(METRICS_EVENT_DEF)) + }); + + match result { + Ok(value) => { + if value == 0 { + // Temporary print as a measure for quick testing + // will be replaced with proper logging mechanism + println!("Tracepoint registered successfully.") + } else if value == 95 { + global::handle_error(MetricsError::Other( + "Trace/debug file systems are not mounted.".into(), + )); + } else if value == 13 { + global::handle_error(MetricsError::Other( + "Insufficient permissions. You need read/write/execute permissions to user_events tracing directory.".into(), + )); + } + value + } + // We don't want to ever panic so we catch the error and return a unique code for retry + Err(err) => { + global::handle_error(MetricsError::Other(format!( + "Tracepoint failed to register: {:?}.", + err, + ))); + -1 + } + } +} diff --git a/opentelemetry-user-events-metrics/src/transform/mod.rs b/opentelemetry-user-events-metrics/src/transform/mod.rs new file mode 100644 index 00000000..ec736485 --- /dev/null +++ b/opentelemetry-user-events-metrics/src/transform/mod.rs @@ -0,0 +1,117 @@ +use opentelemetry::{global, metrics::MetricsError}; +use opentelemetry_proto::tonic::common::v1::InstrumentationScope as TonicInstrumentationScope; +use opentelemetry_proto::tonic::resource::v1::Resource as TonicResource; +use opentelemetry_proto::tonic::{ + collector::metrics::v1::ExportMetricsServiceRequest, + metrics::v1::{ + exemplar::Value as TonicExemplarValue, metric::Data as TonicMetricData, + number_data_point::Value as TonicDataPointValue, + AggregationTemporality as TonicTemporality, DataPointFlags as TonicDataPointFlags, + Metric as TonicMetric, NumberDataPoint as TonicNumberDataPoint, + ResourceMetrics as TonicResourceMetrics, ScopeMetrics as TonicScopeMetrics, + Sum as TonicSum, + }, +}; +use opentelemetry_sdk::metrics::data::{ + Metric as SdkMetric, ResourceMetrics as SDKResourceMetrics, ScopeMetrics as SdkScopeMetrics, + Sum as SdkSum, +}; +use opentelemetry_sdk::Resource as SdkResource; +use std::any::Any; +use std::fmt; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +pub(crate) fn transform_resource_metrics( + metrics: &SDKResourceMetrics, +) -> ExportMetricsServiceRequest { + ExportMetricsServiceRequest { + resource_metrics: vec![TonicResourceMetrics { + resource: transform_resource(&metrics.resource), + scope_metrics: transform_scope_metrics(&metrics.scope_metrics), + schema_url: metrics + .resource + .schema_url() + .map(Into::into) + .unwrap_or_default(), + }], + } +} + +fn transform_resource(r: &SdkResource) -> Option { + if r.is_empty() { + return None; + } + + Some(TonicResource { + attributes: r.iter().map(Into::into).collect(), + dropped_attributes_count: 0, + }) +} + +fn transform_scope_metrics(sms: &[SdkScopeMetrics]) -> Vec { + sms.iter() + .map(|sm| TonicScopeMetrics { + scope: Some(TonicInstrumentationScope::from(&sm.scope)), + metrics: transform_metrics(&sm.metrics), + schema_url: sm + .scope + .schema_url + .as_ref() + .map(ToString::to_string) + .unwrap_or_default(), + }) + .collect() +} + +fn transform_metrics(metrics: &[SdkMetric]) -> Vec { + metrics + .iter() + .map(|metric| TonicMetric { + name: metric.name.to_string(), + description: metric.description.to_string(), + unit: metric.unit.as_str().to_string(), + data: transform_data(metric.data.as_any()), + }) + .collect() +} + +fn transform_data(data: &dyn Any) -> Option { + if let Some(sum) = data.downcast_ref::>() { + Some(TonicMetricData::Sum(transform_sum(sum))) + } else if let Some(sum) = data.downcast_ref::>() { + Some(TonicMetricData::Sum(transform_sum(sum))) + } else if let Some(sum) = data.downcast_ref::>() { + Some(TonicMetricData::Sum(transform_sum(sum))) + } else { + global::handle_error(MetricsError::Other("unknown aggregator".into())); + None + } +} + +fn transform_sum + Into + Copy>( + sum: &SdkSum, +) -> TonicSum { + TonicSum { + data_points: sum + .data_points + .iter() + .map(|dp| TonicNumberDataPoint { + attributes: dp.attributes.iter().map(Into::into).collect(), + start_time_unix_nano: dp.start_time.map(to_nanos).unwrap_or_default(), + time_unix_nano: dp.time.map(to_nanos).unwrap_or_default(), + // No support for exemplars + exemplars: Vec::new(), + flags: TonicDataPointFlags::default() as u32, + value: Some(dp.value.into()), + }) + .collect(), + aggregation_temporality: TonicTemporality::from(sum.temporality).into(), + is_monotonic: sum.is_monotonic, + } +} + +fn to_nanos(time: SystemTime) -> u64 { + time.duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_nanos() as u64 +} diff --git a/opentelemetry-zpages/CHANGELOG.md b/opentelemetry-zpages/CHANGELOG.md new file mode 100644 index 00000000..2290944e --- /dev/null +++ b/opentelemetry-zpages/CHANGELOG.md @@ -0,0 +1,44 @@ +# Changelog + +## vNext + +WARNING The current version relies on features only in upstream git version. This should be modified before releasing. + +## v0.6.0 + +### Changed + +- Bump MSRV to 1.65 [#1318](https://github.com/open-telemetry/opentelemetry-rust/pull/1318) +- Bump MSRV to 1.64 [#1203](https://github.com/open-telemetry/opentelemetry-rust/pull/1203) +- Use tonic based generated files [#1214](https://github.com/open-telemetry/opentelemetry-rust/pull/1214) + +## v0.5.0 + +### Updates + +- Update to opentelemetry-api v0.20.0 + +## v0.4.0 + +- Update to opentelemetry v0.19.0 +- Update to opentelemetry-proto v0.2.0 +- Bump MSRV to 1.57 [#953](https://github.com/open-telemetry/opentelemetry-rust/pull/953). +- Update dependencies and bump MSRV to 1.60 [#969](https://github.com/open-telemetry/opentelemetry-rust/pull/969). + +## v0.3.0 + +### Changed + +- Update to opentelemetry v0.18.0 + +## v0.2.0 + +### Changed + +- Update to opentelemetry v0.17.0 + +## v0.1.0 + +### Added + +- Add Tracez http endpoint. diff --git a/opentelemetry-zpages/CODEOWNERS b/opentelemetry-zpages/CODEOWNERS new file mode 100644 index 00000000..d6962a90 --- /dev/null +++ b/opentelemetry-zpages/CODEOWNERS @@ -0,0 +1,5 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. + +# For anything not explicitly taken by someone else: +* @open-telemetry/rust-approvers diff --git a/opentelemetry-zpages/Cargo.toml b/opentelemetry-zpages/Cargo.toml new file mode 100644 index 00000000..af9053fc --- /dev/null +++ b/opentelemetry-zpages/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "opentelemetry-zpages" +version = "0.6.0" +description = "ZPages implementation for OpenTelemetry" +homepage = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-zpages" +repository = "https://github.com/open-telemetry/opentelemetry-rust-contrib/tree/main/opentelemetry-zpages" +readme = "README.md" +categories = [ + "development-tools::debugging", + "development-tools::profiling", + "asynchronous", +] +keywords = ["opentelemetry", "zipkin", "tracing", "async"] +license = "Apache-2.0" +edition = "2021" +rust-version = "1.65" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +# TODO: Set to published version before pushing. +opentelemetry = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", branch = "main" } +opentelemetry_sdk = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", branch = "main", default-features = false, features = ["trace"] } +opentelemetry-proto = { version = "0.4", features = ["zpages", "gen-tonic", "with-serde"], default-features = false } +async-channel = "1.6" +futures-channel = "0.3" +futures-util = { version = "0.3", default-features = false, features = ["std"] } +serde = "1.0" +serde_json = "1.0" + +[dev-dependencies] +tokio = { version = "1.0", features = ["macros", "rt"] } +opentelemetry_sdk = { version = "0.21", features = ["trace", "testing"] } +rand = "0.8" +hyper = { version = "0.14", features = ["full"] } + +[[example]] +name = "zpages" +path = "examples/zpages.rs" diff --git a/opentelemetry-zpages/DESIGN.md b/opentelemetry-zpages/DESIGN.md new file mode 100644 index 00000000..16199599 --- /dev/null +++ b/opentelemetry-zpages/DESIGN.md @@ -0,0 +1,60 @@ +# Design proposal + +## Problem statement +> zPages are an in-process alternative to external exporters. When included, they collect and aggregate tracing and metrics information in the background; this data is served on web pages when requested. + +As noted in [Opentelemetry zPages spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/experimental/trace/zpages.md). zPages is a tool to help diagnose the application issues as well as the instrument issues without a external service. + +There are several types of zPages defined in spec. Currently, we will only implement the tracez + +## Prior arts +Many language clients in OpenTelemetry already implement at least part of the zpages service like [Cpp](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/ext/src/zpages/README.md). + +## Overall design +
+Diagram + +``` + ┌─────────────────────────┐ ┌────────────────────────┐ + │ │ ZPage Message│ │ +┌────────┐Regiser │ ZPage Span Processor ├──────────────► Span Aggregator │ +│ Span ├────────► │ │ │ +└────────┘ └─────────────────────────┘ └───────────▲────────────┘ + │ + ┌─────────────────────────┐ │ + │ │ │ + │ Web Server │ │ + │ │ │ + │ ┌─────────────────┐ │ ZPage Query │ + │ │ Serilizer │ ├──────────────────────────┘ + │ │ │ │ + │ └─────────────────┘ │ + │ │ + │ │ + └─────────────────────────┘ +``` +
+ +### ZPages Span Processor +This struct is needed mainly to integrate the existing tracing API. Most of its work will be delegated to `Span Aggregator`. This struct will implement `Span Processor` and `Tracez` trait. + +### Span Aggregator +The Span aggregator will maintain a internal data storage to allow users track: +1. The number of current running spans. +2. The number of errored spans. +3. The number of spans in different latency buckets. +4. Current running spans examples. +5. Error spans examples. +6. Span examples with different run times distributed in 9 buckets. + +The span aggregator should maintain a worker loop to handle the messages from the zpages span processor and web server. This worker loop should be non-blocking, so the zpages span processor will not block the span export at any point. + + +## Design ideas +1. Span aggregator embedded into zpages span processor + +One alternative choice other than using channels is to embed into the span aggregator. Then when span starts, span ends or there is an incoming http requests. We can lock the span aggregator to change the state. + +However, using this approach will block the `on_start` or `on_end` methods of zpages span processor if the span aggregator is working on serving a http request, which will further block the span processor chain to move forward when span ends. + +This approach could have avoided the cloning when span starts. But unfortunately current span API doesn't allow us to get the span name without clone the `Span` into a `SpanData` object. Thus, the cloning cannot be avoided even if we embed the span aggregator into zpages** span processor. diff --git a/opentelemetry-zpages/LICENSE b/opentelemetry-zpages/LICENSE new file mode 100644 index 00000000..23a2acab --- /dev/null +++ b/opentelemetry-zpages/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 The OpenTelemetry Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/opentelemetry-zpages/README.md b/opentelemetry-zpages/README.md new file mode 100644 index 00000000..b05ec7c9 --- /dev/null +++ b/opentelemetry-zpages/README.md @@ -0,0 +1,23 @@ +![OpenTelemetry — An observability framework for cloud-native software.][splash] + +[splash]: https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/master/assets/logo-text.png + +# OpenTelemetry ZPages + +ZPages server written in Rust + +[![GitHub Actions CI](https://github.com/open-telemetry/opentelemetry-rust/workflows/CI/badge.svg)](https://github.com/open-telemetry/opentelemetry-rust/actions?query=workflow%3ACI+branch%3Amaster) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel/rust-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C03GDP0H023) + +## Overview + +zPages are an in-process alternative to external exporters. When included, they collect and aggregate tracing and metrics information in the background; this data is served on web pages or APIs when requested. + +This crate is still working in progress. Please find its current limitations below. + +Note that this crate is still in **experimental** state. Breaking changes can still happen. Some features may still in development. + +## Tracez + +Tracez shows information on tracing, including aggregation counts for latency, running, and errors for spans grouped by the span name. + diff --git a/opentelemetry-zpages/examples/README.md b/opentelemetry-zpages/examples/README.md new file mode 100644 index 00000000..9148b831 --- /dev/null +++ b/opentelemetry-zpages/examples/README.md @@ -0,0 +1,12 @@ +# ZPages Example + +In this example, we demonstrate how to use zpages to analysis spans. + +Run the following command to start the server on `localhost:3000` +```base +cargo run --example zpages +``` + +1. Then try to access `localhost:3000/running` endpoint. Each request sent to this endpoint will generate a trace whose latency is between 1 ms to 5 s. The latency for each trace will be printed in cmd. + +2. Check `localhost:3000/api/tracez/aggregations` to see the count of running spans, error spans and spans within different latency. \ No newline at end of file diff --git a/opentelemetry-zpages/examples/zpages.rs b/opentelemetry-zpages/examples/zpages.rs new file mode 100644 index 00000000..457a8c8d --- /dev/null +++ b/opentelemetry-zpages/examples/zpages.rs @@ -0,0 +1,110 @@ +use hyper::http::{Request, Response}; +use hyper::service::{make_service_fn, service_fn}; +use hyper::{Body, Server}; +use opentelemetry::trace::Tracer; +use opentelemetry::{ + global, + trace::{Span, Status}, +}; +use opentelemetry_sdk::runtime::Tokio; +use opentelemetry_sdk::trace::TracerProvider; +use opentelemetry_zpages::{tracez, TracezError, TracezQuerier, TracezResponse}; +use rand::Rng; +use std::str::FromStr; +use std::sync::Arc; +use std::{convert::Infallible, net::SocketAddr}; +use tokio::time::Duration; + +async fn handler( + req: Request, + querier: Arc, +) -> Result, Infallible> { + Ok::<_, Infallible>(match req.uri().path() { + uri if uri.starts_with("/tracez/api") => { + // if it is api call + let parts = uri + .split('/') + .filter(|x| !x.is_empty()) + .collect::>(); + if parts.len() < 3 { + Response::builder().status(404).body(Body::empty()).unwrap() + } else { + let operation_name = *(parts.get(2).unwrap_or(&"")); + match operation_name { + "aggregations" => tracez_response_or_server_error(querier.aggregation().await), + "running" => { + if let Some(&span_name) = parts.get(3) { + tracez_response_or_server_error(querier.running(span_name.into()).await) + } else { + Response::builder().status(404).body(Body::empty()).unwrap() + } + } + "error" => { + if let Some(&span_name) = parts.get(3) { + tracez_response_or_server_error(querier.error(span_name.into()).await) + } else { + Response::builder().status(404).body(Body::empty()).unwrap() + } + } + "latency" => { + let bucket_index = parts.get(3); + let span_name = parts.get(4); + match (bucket_index, span_name) { + (Some(&bucket_index), Some(&span_name)) => { + if let Ok(bucket_index) = u32::from_str(bucket_index) { + tracez_response_or_server_error( + querier + .latency(bucket_index as usize, span_name.into()) + .await, + ) + } else { + Response::builder().status(404).body(Body::empty()).unwrap() + } + } + (_, _) => Response::builder().status(404).body(Body::empty()).unwrap(), + } + } + _ => Response::builder().status(404).body(Body::empty()).unwrap(), + } + } + } + "/running" => { + let span_duration = Duration::from_millis(rand::thread_rng().gen_range(1..6000)); + let mut spans = global::tracer("zpages-test").start("running-spans"); + spans.set_status(Status::Ok); + tokio::time::sleep(span_duration).await; + println!("The span slept for {} ms", span_duration.as_millis()); + Response::new(Body::empty()) + } + _ => Response::builder().status(404).body(Body::empty()).unwrap(), + }) +} + +fn tracez_response_or_server_error(resp: Result) -> Response { + match resp { + Ok(resp) => Response::new(Body::from(serde_json::to_string(&resp).unwrap())), + Err(_) => Response::builder().status(500).body(Body::empty()).unwrap(), + } +} + +#[tokio::main] +async fn main() { + let (processor, querier) = tracez(5, Tokio); + let provider = TracerProvider::builder() + .with_span_processor(processor) + .build(); + global::set_tracer_provider(provider); + let querier = Arc::new(querier); + + let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); + + let server = Server::bind(&addr).serve(make_service_fn(move |_conn| { + let inner = Arc::clone(&querier); + async move { Ok::<_, Infallible>(service_fn(move |req| handler(req, Arc::clone(&inner)))) } + })); + + println!("Listening on {addr}"); + if let Err(e) = server.await { + eprintln!("server error: {e}"); + } +} diff --git a/opentelemetry-zpages/src/lib.rs b/opentelemetry-zpages/src/lib.rs new file mode 100644 index 00000000..7842c10d --- /dev/null +++ b/opentelemetry-zpages/src/lib.rs @@ -0,0 +1,69 @@ +//! ZPages implementation for Opentelemetry +//! +//! # Overview +//! zPages are an in-process alternative to external exporters. When included, +//! they collect and aggregate tracing and metrics information in the +//! background; this data is served on web pages or APIs when requested. +//! +//! Currently only tracez components are available. And some of those are still +//! work in progress. Known limitation includes +//! - The sampled running span doesn't reflect the changes made to the span. +//! - The API only returns the json response. +//! - Users have to build their own http server from the components provided. +//! +//! # Get start +//! The first step is to initiate the [`ZPagesSpanProcessor`] and install it in [`TracerProvider`]. +//! +//! ```no_run +//! # use opentelemetry_zpages::tracez; +//! # use opentelemetry::{global, trace::Tracer}; +//! # use opentelemetry_sdk::{runtime::Tokio, trace::TracerProvider}; +//! # use std::sync::Arc; +//! +//! # fn main() { +//! let (processor, querier) = tracez(5, Tokio); +//! let provider = TracerProvider::builder() +//! .with_span_processor(processor) +//! .build(); +//! global::set_tracer_provider(provider); +//! # } +//! ``` +//! +//! Once the [`ZPagesSpanProcessor`] installed. It will record spans when they +//! start or end. +//! +//! Users can then use the [`TracezQuerier`] to query the aggregated span information. +//! +//! A detailed example can also be founded [here]. +//! +//! +//! [`ZPagesSpanProcessor`]: trace::span_processor::ZPagesSpanProcessor +//! [`TracerProvider`]: opentelemetry_sdk::trace::TracerProvider +//! [here]: https://github.com/open-telemetry/opentelemetry-rust/tree/main/examples/zpages +#![warn( + future_incompatible, + missing_debug_implementations, + missing_docs, + nonstandard_style, + rust_2018_idioms, + unreachable_pub, + unused +)] +#![allow(elided_lifetimes_in_paths)] +#![cfg_attr( + docsrs, + feature(doc_cfg, doc_auto_cfg), + deny(rustdoc::broken_intra_doc_links) +)] +#![doc( + html_logo_url = "https://raw.githubusercontent.com/open-telemetry/opentelemetry-rust/master/assets/logo.svg" +)] +#![cfg_attr(test, deny(warnings))] + +use trace::span_queue::SpanQueue; + +mod trace; + +pub use trace::{ + span_processor::ZPagesSpanProcessor, tracez, TracezError, TracezQuerier, TracezResponse, +}; diff --git a/opentelemetry-zpages/src/trace/aggregator.rs b/opentelemetry-zpages/src/trace/aggregator.rs new file mode 100644 index 00000000..2615c17b --- /dev/null +++ b/opentelemetry-zpages/src/trace/aggregator.rs @@ -0,0 +1,447 @@ +//! ## Span Aggregator +//! +//! Process the span information, aggregate counts for latency, running, and errors for spans grouped +//! by name. +use crate::trace::{TracezError, TracezMessage, TracezQuery, TracezResponse}; +use crate::SpanQueue; +use async_channel::Receiver; +use futures_util::StreamExt as _; +use opentelemetry::trace::Status; +use opentelemetry_proto::tonic::tracez::v1::TracezCounts; +use opentelemetry_sdk::export::trace::SpanData; +use std::collections::HashMap; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +const LATENCY_BUCKET: [Duration; 9] = [ + Duration::from_micros(0), + Duration::from_micros(10), + Duration::from_micros(100), + Duration::from_millis(1), + Duration::from_millis(10), + Duration::from_millis(100), + Duration::from_secs(1), + Duration::from_secs(10), + Duration::from_secs(100), +]; +const LATENCY_BUCKET_COUNT: usize = 9; + +/// Aggregate span information from `ZPagesSpanProcessor` and feed that information to server when +/// requested. +#[derive(Debug)] +pub(crate) struct SpanAggregator { + receiver: Receiver, + summaries: HashMap, + sample_size: usize, +} + +impl SpanAggregator { + /// Create a span aggregator + pub(crate) fn new(receiver: Receiver, sample_size: usize) -> SpanAggregator { + SpanAggregator { + receiver, + summaries: HashMap::new(), + sample_size, + } + } + + /// Process request from http server or the span processor. + pub(crate) async fn process(&mut self) { + let sample_size = self.sample_size; + loop { + match self.receiver.next().await { + None => { + // all senders have been dropped. Thus, close it + self.receiver.close(); + return; + } + Some(msg) => { + match msg { + TracezMessage::ShutDown => { + self.receiver.close(); + return; + } + TracezMessage::SpanEnd(span) => { + let summary = self + .summaries + .entry(span.name.clone().into()) + .or_insert_with(|| SpanSummary::new(sample_size)); + + summary.running.remove(span.span_context.clone()); + + if matches!(span.status, Status::Error { .. }) { + summary.error.push_back(span); + } else { + let latency_idx = latency_bucket(span.start_time, span.end_time); + if let Some(queue) = summary.latencies.get_mut(latency_idx) { + queue.push_back(span) + } + } + } + TracezMessage::SampleSpan(span) => { + // Resample span whenever there is a new span starts. + // + // This helps us clean the stale span that failed to be evicted because + // of the failure to deliver the span end messages. + let summary = self + .summaries + .entry(span.name.clone().into()) + .or_insert_with(|| SpanSummary::new(sample_size)); + summary.running.push_back(span) + } + TracezMessage::Query { query, response_tx } => { + let result = self.handle_query(query); + let _ = response_tx.send(result); + } + } + } + } + } + } + + fn handle_query(&mut self, query: TracezQuery) -> Result { + match query { + TracezQuery::Aggregation => Ok(TracezResponse::Aggregation( + self.summaries + .iter() + .map(|(span_name, summary)| TracezCounts { + spanname: span_name.clone(), + latency: summary + .latencies + .iter() + .map(|queue| queue.count() as u32) + .collect(), + running: summary.running.count() as u32, + error: summary.error.count() as u32, + }) + .collect(), + )), + TracezQuery::Latency { + bucket_index, + span_name, + } => self + .summaries + .get(&span_name) + .ok_or(TracezError::NotFound { + api: "tracez/api/latency/{bucket_index}/{span_name}", + }) + .and_then(|summary| { + summary + .latencies + .get(bucket_index) + .ok_or(TracezError::InvalidArgument { + api: "tracez/api/latency/{bucket_index}/{span_name}", + message: "invalid bucket index", + }) + .map(|queue| TracezResponse::Latency(queue.clone().into())) + }), + TracezQuery::Error { span_name } => self + .summaries + .get(&span_name) + .ok_or(TracezError::NotFound { + api: "tracez/api/error/{span_name}", + }) + .map(|summary| TracezResponse::Error(summary.error.clone().into())), + TracezQuery::Running { span_name } => self + .summaries + .get(&span_name) + .ok_or(TracezError::NotFound { + api: "tracez/api/error/{span_name}", + }) + .map(|summary| TracezResponse::Running(summary.running.clone().into())), + } + } +} + +fn latency_bucket(start_time: SystemTime, end_time: SystemTime) -> usize { + let latency = end_time + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_millis(0)) + - start_time + .duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_millis(0)); + for (idx, lower) in LATENCY_BUCKET.iter().copied().enumerate().skip(1) { + if lower > latency { + return idx - 1; + } + } + LATENCY_BUCKET.len() - 1 +} + +#[derive(Debug)] +struct SpanSummary { + running: SpanQueue, + error: SpanQueue, + latencies: Vec, +} + +impl SpanSummary { + fn new(sample_size: usize) -> SpanSummary { + SpanSummary { + running: SpanQueue::new(sample_size), + error: SpanQueue::new(sample_size), + latencies: vec![SpanQueue::new(sample_size); LATENCY_BUCKET_COUNT], + } + } +} + +impl> From for Vec { + fn from(span_queue: SpanQueue) -> Self { + span_queue.spans().into_iter().map(Into::into).collect() + } +} + +#[cfg(test)] +mod tests { + use crate::trace::{ + aggregator::{SpanAggregator, LATENCY_BUCKET_COUNT}, + span_queue::SpanQueue, + TracezMessage, + }; + use opentelemetry::trace::{SpanContext, SpanId, Status, TraceFlags, TraceId, TraceState}; + use opentelemetry_sdk::{export::trace::SpanData, testing::trace::new_test_export_span_data}; + use std::borrow::Cow; + use std::cmp::min; + use std::time::{Duration, SystemTime}; + + enum Action { + Start, + End(Duration), // end with latency + } + + struct ProcessTestPlan { + // (trace id, span id, trace flag, is error) + input: Vec<(u128, u64, u8, bool, Action)>, + // (trace id, span id, trace flag, is error) + expect_running: Vec<(u128, u64, u8, bool)>, + // (trace id, span id, trace flag, is error) + expect_error: Vec<(u128, u64, u8, bool)>, + // (index of the latency bucket, trace id, span id, trace flag, is error) + expect_latencies: Vec<(usize, u128, u64, u8, bool)>, + // name of the test plan + name: &'static str, + } + + impl ProcessTestPlan { + pub(crate) fn get_expect_running(&self) -> Vec { + self.expect_running + .iter() + .cloned() + .map(|(trace_id, span_id, trace_flag, is_error)| { + span_data(trace_id, span_id, trace_flag, is_error) + }) + .collect() + } + + pub(crate) fn get_expect_error(&self) -> Vec { + self.expect_error + .iter() + .cloned() + .map(|(trace_id, span_id, trace_flag, is_error)| { + span_data(trace_id, span_id, trace_flag, is_error) + }) + .collect() + } + + pub(crate) fn get_latencies(&self) -> Vec> { + let mut sink = vec![Vec::new(); LATENCY_BUCKET_COUNT]; + for (index, trace_id, span_id, trace_flag, is_error) in self.expect_latencies.clone() { + sink.get_mut(index) + .unwrap() + .push(span_data(trace_id, span_id, trace_flag, is_error)) + } + sink + } + + pub(crate) fn get_input(&self) -> (Vec, Vec) { + let mut start_spans = Vec::new(); + let mut end_spans = Vec::new(); + let start_time = SystemTime::now(); + for input in &self.input { + let mut span_data = span_data(input.0, input.1, input.2, input.3); + match input.4 { + Action::Start => { + span_data.start_time = start_time; + start_spans.push(span_data); + } + Action::End(duration) => { + span_data.start_time = start_time; + span_data.end_time = start_time.checked_add(duration).unwrap(); + end_spans.push(span_data); + } + } + } + (start_spans, end_spans) + } + } + + fn span_data(trace_id: u128, span_id: u64, trace_flag: u8, is_error: bool) -> SpanData { + let mut span_data = new_test_export_span_data(); + span_data.span_context = SpanContext::new( + TraceId::from_u128(trace_id), + SpanId::from_u64(span_id), + TraceFlags::new(trace_flag), + true, + TraceState::default(), + ); + span_data.name = Cow::from("test-service"); + span_data.status = { + if is_error { + Status::error("") + } else { + Status::Ok + } + }; + span_data + } + + #[tokio::test] + async fn test_span_aggregator() -> Result<(), Box> { + const SAMPLE_SIZE: usize = 5; + let test_cases = vec![ + ProcessTestPlan { + name: "start and end", + input: vec![ + (1, 1, 0, false, Action::Start), + (1, 1, 0, false, Action::End(Duration::from_millis(2))), + ], + expect_running: vec![], + expect_error: vec![], + expect_latencies: vec![(3, 1, 1, 0, false)], + }, + ProcessTestPlan { + name: "start and end with error", + input: vec![ + (1, 1, 0, false, Action::Start), + (1, 1, 0, true, Action::End(Duration::from_millis(2))), + ], + expect_latencies: vec![], + expect_error: vec![(1, 1, 0, true)], + expect_running: vec![], + }, + ProcessTestPlan { + name: "start but not finish", + input: vec![ + (1, 2, 0, false, Action::Start), + (1, 1, 0, false, Action::Start), + (1, 2, 0, false, Action::End(Duration::from_secs(6))), + ], + expect_running: vec![(1, 1, 0, false)], + expect_error: vec![], + expect_latencies: vec![(6, 1, 2, 0, false)], + }, + ProcessTestPlan { + name: "accept spans without started record", + input: vec![(1, 1, 0, false, Action::End(Duration::from_secs(6)))], + expect_latencies: vec![(6, 1, 1, 0, false)], + expect_running: vec![], + expect_error: vec![], + }, + ProcessTestPlan { + name: "evicted spans if the queue is filled", + input: { + let mut input = Vec::with_capacity((SAMPLE_SIZE + 1) * 2); + for i in 0..SAMPLE_SIZE + 1 { + input.push((1, i as u64 + 1, 0, false, Action::Start)); + input.push(( + 1, + i as u64 + 1, + 0, + false, + Action::End(Duration::from_secs(3)), + )); + } + input + }, + expect_latencies: { + let mut latencies = Vec::with_capacity(SAMPLE_SIZE + 1); + for i in 0..SAMPLE_SIZE + 1 { + latencies.push((6, 1, i as u64 + 1, 0, false)); + } + latencies + }, + expect_running: vec![], + expect_error: vec![], + }, + ]; + + let assert_span_queue = |span_queue: &SpanQueue, expected: Vec, msg: String| { + assert_eq!(span_queue.len(), min(SAMPLE_SIZE, expected.len())); + for collected_span in span_queue.clone().spans() { + assert!( + expected + .iter() + .any(|expected_span| collected_span.span_context + == expected_span.span_context + && collected_span.status == expected_span.status), + "{}", + msg + ) + } + }; + + for plan in test_cases { + let running = plan.get_expect_running(); + let error = plan.get_expect_error(); + let latencies = plan.get_latencies(); + let plan_name = plan.name.to_string(); + + let (sender, receiver) = async_channel::unbounded(); + let mut aggregator = SpanAggregator::new(receiver, SAMPLE_SIZE); + + let handle = tokio::spawn(async move { + aggregator.process().await; + + assert_ne!(aggregator.summaries.len(), 0); + let summary = aggregator + .summaries + .get::(&"test-service".to_string()) + .unwrap(); + + assert_span_queue( + &summary.running, + running, + format!( + "{} fails because the running status is not expected", + plan_name + ), + ); + assert_span_queue( + &summary.error, + error, + format!( + "{} fails because the error status is not expected", + plan_name + ), + ); + // check the result lengths are expected + + for (index, expected) in (0..LATENCY_BUCKET_COUNT).zip(latencies) { + assert_span_queue( + summary.latencies.get(index).unwrap(), + expected, + format!( + "{} fails because the latency status with index {} is not expected", + plan_name, index, + ), + ); + } + }); + + let (start_spans, end_spans) = plan.get_input(); + + for span in start_spans.into_iter() { + sender.send(TracezMessage::SampleSpan(span)).await?; + } + + for span in end_spans.into_iter() { + sender.send(TracezMessage::SpanEnd(span)).await?; + } + + sender.send(TracezMessage::ShutDown).await?; + + handle.await?; + } + + Ok(()) + } +} diff --git a/opentelemetry-zpages/src/trace/mod.rs b/opentelemetry-zpages/src/trace/mod.rs new file mode 100644 index 00000000..4e455934 --- /dev/null +++ b/opentelemetry-zpages/src/trace/mod.rs @@ -0,0 +1,293 @@ +//! Tracez implementation +//! +use async_channel::{SendError, Sender}; +use futures_channel::oneshot::{self, Canceled}; +use opentelemetry_proto::tonic::tracez::v1::{ErrorData, LatencyData, RunningData, TracezCounts}; +use opentelemetry_sdk::{export::trace::SpanData, runtime::Runtime}; +use serde::ser::SerializeSeq; +use serde::Serializer; +use std::fmt::Formatter; +use std::sync::Arc; + +mod aggregator; +pub(crate) mod span_processor; +pub(crate) mod span_queue; + +/// Create tracez components. This function will return a [`ZPagesSpanProcessor`] that should be installed +/// into the [`TracerProvider`] and a [`TracezQuerier`] for http server to access the aggregated +/// information on spans. +/// +/// The `sample_size` config how may spans to sample for each unique span name. +/// +/// [`ZPagesSpanProcessor`]: span_processor::ZPagesSpanProcessor +/// [`TracerProvider`]: opentelemetry_sdk::trace::TracerProvider +/// +/// ## Example +/// ```no_run +/// # use opentelemetry_zpages::tracez; +/// # use opentelemetry::{global, trace::Tracer}; +/// # use opentelemetry_sdk::{runtime::Tokio, trace::TracerProvider}; +/// # use std::sync::Arc; +/// # fn main() { +/// let (processor, querier) = tracez(5, Tokio); // sample 5 spans for each unique span name +/// let provider = TracerProvider::builder() +/// .with_span_processor(processor) +/// .build(); +/// global::set_tracer_provider(provider); +/// +/// // use querier to retrieve the aggregated span information +/// # } +/// +/// ``` +pub fn tracez( + sample_size: usize, + runtime: R, +) -> (span_processor::ZPagesSpanProcessor, TracezQuerier) { + let (tx, rx) = async_channel::unbounded(); + let span_processor = span_processor::ZPagesSpanProcessor::new(tx.clone()); + let mut aggregator = aggregator::SpanAggregator::new(rx, sample_size); + runtime.spawn(Box::pin(async move { + aggregator.process().await; + })); + (span_processor, TracezQuerier(Arc::new(tx))) +} + +/// Message that used to pass commend between web servers, aggregators and span processors. +pub enum TracezMessage { + /// Sample span on start + SampleSpan(SpanData), + /// Span ended + SpanEnd(SpanData), + /// Shut down the aggregator + ShutDown, + /// Run a query from the web service + Query { + /// Query content + query: TracezQuery, + /// Channel to send the response + response_tx: oneshot::Sender>, + }, +} + +impl std::fmt::Debug for TracezMessage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self { + TracezMessage::SampleSpan(_) => f.write_str("span starts"), + TracezMessage::SpanEnd(_) => f.write_str("span ends"), + TracezMessage::ShutDown => f.write_str("shut down"), + TracezMessage::Query { .. } => f.write_str("query aggregation results"), + } + } +} + +/// Tracez APIs. +/// As defined in [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/experimental/trace/zpages.md#http-server) +#[derive(Debug)] +pub enum TracezQuery { + /// tracez/api/aggregations + Aggregation, + /// tracez/api/latency/{bucket_index}/{span_name} + Latency { + /// index of the bucket in API path + bucket_index: usize, + /// span name in API path + span_name: String, + }, + /// tracez/api/running/{span_name} + Running { + /// span name in API path + span_name: String, + }, + /// tracez/api/error/{span_name} + Error { + /// span name in API path + span_name: String, + }, +} + +/// Tracez APIs' response +#[derive(Debug)] +pub enum TracezResponse { + /// tracez/api/aggregations + Aggregation(Vec), + /// tracez/api/latency/{bucket_index}/{span_name} + Latency(Vec), + /// tracez/api/running/{span_name} + Running(Vec), + /// tracez/api/error/{span_name} + Error(Vec), +} + +impl serde::Serialize for TracezResponse { + fn serialize(&self, serializer: S) -> Result<::Ok, ::Error> + where + S: Serializer, + { + match self { + TracezResponse::Aggregation(data) => { + let mut list = serializer.serialize_seq(Some(data.len()))?; + for e in data { + list.serialize_element(e)?; + } + list.end() + } + TracezResponse::Latency(data) => { + let mut list = serializer.serialize_seq(Some(data.len()))?; + for e in data { + list.serialize_element(e)?; + } + list.end() + } + TracezResponse::Running(data) => { + let mut list = serializer.serialize_seq(Some(data.len()))?; + for e in data { + list.serialize_element(e)?; + } + list.end() + } + TracezResponse::Error(data) => { + let mut list = serializer.serialize_seq(Some(data.len()))?; + for e in data { + list.serialize_element(e)?; + } + list.end() + } + } + } +} + +/// Provide wrapper functions to query the aggregated span info. +// TracezQuerier creates the oneshot channel and send the TracezMessage to the SpanAggregator. +#[derive(Clone, Debug)] +pub struct TracezQuerier(Arc>); + +impl TracezQuerier { + /// Return the aggregation status for spans. + /// + /// The aggregation will contains the error, running and latency counts for all span name + /// groupings. + pub async fn aggregation(&self) -> Result { + let (tx, rx) = oneshot::channel(); + self.0 + .send(TracezMessage::Query { + query: TracezQuery::Aggregation, + response_tx: tx, + }) + .await?; + rx.await.map_err::(Into::into)? + } + + /// Return the sample spans for the given bucket index. + pub async fn latency( + &self, + bucket_index: usize, + span_name: String, + ) -> Result { + let (tx, rx) = oneshot::channel(); + self.0 + .send(TracezMessage::Query { + query: TracezQuery::Latency { + bucket_index, + span_name, + }, + response_tx: tx, + }) + .await?; + rx.await.map_err::(Into::into)? + } + + /// Return the sample running spans' snapshot. + /// + /// Note that current implementation cannot include the changes made to spans after the spans + /// started. For example, the events added or the links added. + pub async fn running(&self, span_name: String) -> Result { + let (tx, rx) = oneshot::channel(); + self.0 + .send(TracezMessage::Query { + query: TracezQuery::Running { span_name }, + response_tx: tx, + }) + .await?; + rx.await.map_err::(Into::into)? + } + + /// Return the sample spans with error status. + pub async fn error(&self, span_name: String) -> Result { + let (tx, rx) = oneshot::channel(); + self.0 + .send(TracezMessage::Query { + query: TracezQuery::Error { span_name }, + response_tx: tx, + }) + .await?; + rx.await.map_err::(Into::into)? + } +} + +impl Drop for TracezQuerier { + fn drop(&mut self) { + // shut down aggregator if it is still running + let _ = self.0.try_send(TracezMessage::ShutDown); + } +} + +/// Tracez API's error. +#[derive(Debug)] +pub enum TracezError { + /// There isn't a valid tracez operation for that API + InvalidArgument { + /// Describe the operation on the tracez + api: &'static str, + /// Error message + message: &'static str, + }, + /// Operation cannot be found + NotFound { + /// Describe the operation on the tracez + api: &'static str, + }, + /// Error when serialize the TracezResponse to json. + Serialization, + /// The span aggregator has been dropped. + AggregatorDropped, +} + +impl From for TracezError { + fn from(_: Canceled) -> Self { + TracezError::AggregatorDropped + } +} + +impl From> for TracezError { + fn from(_: SendError) -> Self { + // Since we employed a unbounded channel to send message to aggregator. + // The only reason why the send would return errors is the receiver has closed + // This should only happen if the span aggregator has been dropped. + TracezError::AggregatorDropped + } +} + +impl std::fmt::Display for TracezError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + TracezError::InvalidArgument { api: _, message } => f.write_str(message), + TracezError::NotFound { api: _ } => { + f.write_str("the requested resource is not founded") + } + TracezError::Serialization => f.write_str("cannot serialize the response into json"), + TracezError::AggregatorDropped => { + f.write_str("the span aggregator is already dropped when querying") + } + } + } +} + +impl TracezResponse { + /// Convert the `TracezResponse` into json. + /// + /// Throw a `TracezError` if the serialization fails. + #[cfg(feature = "with-serde")] + pub fn into_json(self) -> Result { + serde_json::to_string(&self).map_err(|_| TracezError::Serialization) + } +} diff --git a/opentelemetry-zpages/src/trace/span_processor.rs b/opentelemetry-zpages/src/trace/span_processor.rs new file mode 100644 index 00000000..a9760d4f --- /dev/null +++ b/opentelemetry-zpages/src/trace/span_processor.rs @@ -0,0 +1,60 @@ +//! ## zPages processor +//! +//! ZPages processor collect span information when span starts or ends and send it to [`SpanAggregator`] +//! for further process. +//! +//! [`SpanAggregator`]:../struct.SpanAggregator.html +use crate::trace::TracezMessage; +use async_channel::Sender; +use opentelemetry::{trace::TraceResult, Context}; +use opentelemetry_sdk::{ + export::trace::SpanData, + trace::{Span, SpanProcessor}, +}; +use std::fmt::Formatter; + +/// ZPagesSpanProcessor is an alternative to external exporters. It sends span data to zPages server +/// where it will be archive and user can use this information for debug purpose. +/// +/// ZPagesSpanProcessor employs a `SpanAggregator` running as another task to aggregate the spans +/// using the name of spans. +pub struct ZPagesSpanProcessor { + tx: Sender, +} + +impl std::fmt::Debug for ZPagesSpanProcessor { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str("ZPageProcessor") + } +} + +impl ZPagesSpanProcessor { + /// Create a new `ZPagesSpanProcessor`. + pub fn new(tx: Sender) -> ZPagesSpanProcessor { + ZPagesSpanProcessor { tx } + } +} + +impl SpanProcessor for ZPagesSpanProcessor { + fn on_start(&self, span: &mut Span, _cx: &Context) { + // if the aggregator is already dropped. This is a no-op + if let Some(data) = span.exported_data() { + let _ = self.tx.try_send(TracezMessage::SampleSpan(data)); + } + } + + fn on_end(&self, span: SpanData) { + // if the aggregator is already dropped. This is a no-op + let _ = self.tx.try_send(TracezMessage::SpanEnd(span)); + } + + fn force_flush(&self) -> TraceResult<()> { + // do nothing + Ok(()) + } + + fn shutdown(&mut self) -> TraceResult<()> { + // do nothing + Ok(()) + } +} diff --git a/opentelemetry-zpages/src/trace/span_queue.rs b/opentelemetry-zpages/src/trace/span_queue.rs new file mode 100644 index 00000000..f9fc2bd1 --- /dev/null +++ b/opentelemetry-zpages/src/trace/span_queue.rs @@ -0,0 +1,225 @@ +//! # Span Queue + +use opentelemetry::trace::SpanContext; +use opentelemetry_sdk::export::trace::SpanData; +#[cfg(feature = "serialize")] +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// This queue maintains an ordered list of elements, Elements are +/// removed from the queue in a first in first out fashion. +#[derive(Clone, Debug)] +pub(crate) struct SpanQueue { + // We can't really use the opentelemetry::EvictedQueue here because + // we need to compare the SpanData based on their span context + // rather than all fields. Thus, we cannot use SpanData's default + // equal function as it compares all fields. + + // All operation within SpanQueue should be O(1) + queue: Vec, + map: HashMap, + next_idx: usize, + capacity: usize, + count: usize, +} + +impl PartialEq for SpanQueue { + fn eq(&self, other: &Self) -> bool { + self.queue.eq(&other.queue) && self.next_idx == other.next_idx + } +} + +impl SpanQueue { + /// Create a new `SpanQueue` with a given max length. + pub(crate) fn new(max_len: usize) -> Self { + SpanQueue { + queue: Vec::with_capacity(max_len), + next_idx: 0, + map: HashMap::with_capacity(max_len), + capacity: max_len, + count: 0, + } + } + + /// Push a new element to the back of the queue + /// If the queue is filled. Replace the left most element inside the queue. + pub(crate) fn push_back(&mut self, value: SpanData) { + self.next_idx %= self.capacity; + self.map.insert(value.span_context.clone(), self.next_idx); + match self.queue.get_mut(self.next_idx) { + Some(ele) => { + self.map.remove(&ele.span_context); + *ele = value; + } + None => { + self.queue.push(value); + } + } + self.count += 1; + self.next_idx += 1; + } + + /// Returns the number of sampled spans in the `SpanQueue`. + #[allow(unused)] // used in testing + pub(crate) fn len(&self) -> usize { + self.queue.len() + } + + /// Return the count of spans in the `SpanQueue`. + /// + /// The count tracks the total number of spans instead of the number of sampled spans. + /// Use `len` function for the current number of sampled spans. + /// + /// The count will add 1 whenever the `push_back` function is called and + /// decrease 1 whenever the `remove` function is called. + pub(crate) fn count(&self) -> usize { + self.count + } + + /// Remove one element if exist. + pub(crate) fn remove(&mut self, span_context: SpanContext) -> Option { + self.count = self.count.saturating_sub(1); + if !self.map.contains_key(&span_context) { + None + } else { + self.next_idx = self.queue.len() - 1; + let idx = *(self.map.get(&span_context).unwrap()); + if idx == self.queue.len() - 1 { + // if it's last element, just remove + self.map.remove(&span_context); + Some(self.queue.remove(idx)) + } else { + let last_span_context = self.queue.last().unwrap().span_context.clone(); + self.map.remove(&span_context); + self.map.insert(last_span_context, idx); + Some(self.queue.swap_remove(idx)) + } + } + } + + /// Return all spans it currently hold + pub(crate) fn spans(self) -> Vec { + self.queue.into_iter().collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use opentelemetry::trace::{SpanId, TraceFlags, TraceId, TraceState}; + use opentelemetry_sdk::testing::trace::new_test_export_span_data; + use std::time::SystemTime; + + enum Action { + PushBack(u128, u64), + Remove(u128, u64), + } + + // If the expected is None, means we skip this check in this test plan. + #[derive(Default)] + struct TestPlan { + max_len: usize, + actions: Vec, + expected_next_idx: Option, + expected_queue: Option>, + expected_len: Option, + } + + #[test] + fn test_span_queue() { + let get_span_context = |trace_id: u128, span_id: u64| { + SpanContext::new( + TraceId::from_u128(trace_id), + SpanId::from_u64(span_id), + TraceFlags::new(0), + false, + TraceState::default(), + ) + }; + let time = SystemTime::now(); + let get_span_data = |trace_id: u128, span_id: u64| { + let mut span_data = new_test_export_span_data(); + span_data.span_context = get_span_context(trace_id, span_id); + span_data.start_time = time; + span_data.end_time = time; + span_data + }; + let plans = vec![ + TestPlan { + max_len: 3, + actions: vec![ + Action::PushBack(1, 1), + Action::PushBack(1, 2), + Action::PushBack(1, 3), + Action::PushBack(1, 4), + ], + expected_next_idx: Some(1), + expected_len: Some(3), + expected_queue: Some(vec![(1, 4), (1, 2), (1, 3)]), + }, + TestPlan { + max_len: 3, + actions: vec![ + Action::PushBack(1, 3), + Action::PushBack(2, 2), + Action::PushBack(1, 4), + Action::PushBack(1, 5), + Action::Remove(1, 3), + Action::Remove(1, 4), + ], + expected_queue: Some(vec![(1, 5), (2, 2)]), + expected_next_idx: Some(2), + expected_len: Some(2), + }, + TestPlan { + max_len: 3, + actions: vec![ + Action::PushBack(1, 1), + Action::Remove(1, 3), + Action::Remove(1, 4), + Action::PushBack(1, 3), + Action::Remove(1, 1), + Action::Remove(1, 3), + ], + expected_len: Some(0), + expected_next_idx: Some(0), + expected_queue: Some(vec![]), + }, + ]; + + for plan in plans { + let mut span_queue = SpanQueue::new(plan.max_len); + for action in plan.actions { + match action { + Action::PushBack(trace_id, span_id) => { + span_queue.push_back(get_span_data(trace_id, span_id)); + } + Action::Remove(trace_id, span_id) => { + span_queue.remove(get_span_context(trace_id, span_id)); + } + } + } + if let Some(next_id) = plan.expected_next_idx { + assert_eq!(span_queue.next_idx, next_id); + } + if let Some(len) = plan.expected_len { + assert_eq!(span_queue.len(), len); + } + if let Some(queue) = plan.expected_queue { + assert_eq!( + span_queue.queue, + queue + .iter() + .cloned() + .map(|(trace_id, span_id)| get_span_data(trace_id, span_id)) + .collect::>() + ); + assert_eq!(span_queue.map.len(), queue.len()); + for (idx, (trace_id, span_id)) in queue.into_iter().enumerate() { + let span_context = get_span_context(trace_id, span_id); + assert_eq!(span_queue.map.get(&span_context).copied(), Some(idx)); + } + } + } + } +} diff --git a/precommit.sh b/precommit.sh new file mode 100755 index 00000000..0144ba8a --- /dev/null +++ b/precommit.sh @@ -0,0 +1 @@ +cargo update && cargo fmt --all && ./scripts/lint.sh && ./scripts/test.sh \ No newline at end of file diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 00000000..3a26366d --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1 @@ +edition = "2021" diff --git a/scripts/lint.sh b/scripts/lint.sh new file mode 100755 index 00000000..f6b69701 --- /dev/null +++ b/scripts/lint.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -eu + +cargo_feature() { + echo "checking $1 with features $2" + cargo clippy --manifest-path=$1/Cargo.toml --all-targets --features "$2" --no-default-features -- \ + `# Exit with a nonzero code if there are clippy warnings` \ + -Dwarnings +} + +if rustup component add clippy; then + cargo clippy --all-targets --all-features -- \ + `# Exit with a nonzero code if there are clippy warnings` \ + -Dwarnings + + cargo_feature opentelemetry "trace,metrics,logs,logs_level_enabled,testing" + + cargo_feature opentelemetry-otlp "default" + cargo_feature opentelemetry-otlp "default,tls" + cargo_feature opentelemetry-otlp "default,tls-roots" + cargo_feature opentelemetry-otlp "trace,grpc-sys" + cargo_feature opentelemetry-otlp "trace,grpc-sys,openssl" + cargo_feature opentelemetry-otlp "trace,grpc-sys,openssl-vendored" + cargo_feature opentelemetry-otlp "http-proto" + cargo_feature opentelemetry-otlp "http-proto, reqwest-blocking-client" + cargo_feature opentelemetry-otlp "http-proto, reqwest-client" + cargo_feature opentelemetry-otlp "http-proto, reqwest-rustls" + cargo_feature opentelemetry-otlp "http-proto, surf-client, surf/curl-client" + cargo_feature opentelemetry-otlp "metrics" + + cargo_feature opentelemetry-jaeger "surf_collector_client, surf/curl-client" + cargo_feature opentelemetry-jaeger "isahc_collector_client" + cargo_feature opentelemetry-jaeger "reqwest_blocking_collector_client" + cargo_feature opentelemetry-jaeger "reqwest_collector_client" + cargo_feature opentelemetry-jaeger "hyper_collector_client" + cargo_feature opentelemetry-jaeger "hyper_tls_collector_client" + cargo_feature opentelemetry-jaeger "collector_client" + cargo_feature opentelemetry-jaeger "wasm_collector_client" + cargo_feature opentelemetry-jaeger "collector_client, wasm_collector_client" + cargo_feature opentelemetry-jaeger "default" + + cargo_feature opentelemetry-proto "default" + cargo_feature opentelemetry-proto "full" + cargo_feature opentelemetry-proto "gen-tonic,trace" + cargo_feature opentelemetry-proto "gen-tonic,trace,with-serde" + cargo_feature opentelemetry-proto "gen-tonic,metrics" + cargo_feature opentelemetry-proto "gen-tonic,logs" + cargo_feature opentelemetry-proto "gen-grpcio,trace" + cargo_feature opentelemetry-proto "gen-grpcio,trace,with-serde" + cargo_feature opentelemetry-proto "gen-grpcio,metrics" + cargo_feature opentelemetry-proto "gen-grpcio,logs" + cargo_feature opentelemetry-proto "gen-grpcio,zpages" + cargo_feature opentelemetry-proto "gen-grpcio,zpages,with-serde" + +fi diff --git a/scripts/patch_dependencies.sh b/scripts/patch_dependencies.sh new file mode 100755 index 00000000..ff7ffc70 --- /dev/null +++ b/scripts/patch_dependencies.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +function patch_version() { + local latest_version=$(cargo search --limit 1 $1 | head -1 | cut -d'"' -f2) + echo "patching $1 from $latest_version to $2" + cargo update -p $1:$latest_version --precise $2 +} + diff --git a/scripts/test.sh b/scripts/test.sh new file mode 100755 index 00000000..611f1714 --- /dev/null +++ b/scripts/test.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -eu + +cargo test --all --all-features "$@" -- --test-threads=1 + +cargo test --manifest-path=opentelemetry-contrib/Cargo.toml --all-features +cargo test --manifest-path=opentelemetry-datadog/Cargo.toml --all-features +cargo test --manifest-path=opentelemetry-stackdriver/Cargo.toml --all-features +cargo test --manifest-path=opentelemetry-user-events-logs/Cargo.toml --all-features +cargo test --manifest-path=opentelemetry-user-events-metrics/Cargo.toml --all-features