diff --git a/.github/workflows/blazar.yml b/.github/workflows/blazar.yml new file mode 100644 index 0000000..d3e5ccb --- /dev/null +++ b/.github/workflows/blazar.yml @@ -0,0 +1,77 @@ +--- +name: Check blazar build and tests + +on: + push: + branches: + - main + pull_request: + branches: + - '*' + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Install Docker and Docker Compose + run: | + sudo apt-get remove docker docker-engine docker.io containerd runc + sudo apt-get update + sudo apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose + + - uses: actions/setup-go@v5 + with: + go-version: '1.22.8' + + - name: Build and test + run: | + make test + + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: '1.22' + cache: false + + - name: golangci-lint + uses: golangci/golangci-lint-action@v4 + with: + # Require: The version of golangci-lint to use. + # When `install-mode` is `binary` (default) the value can be v1.2 or v1.2.3 or `latest` to use the latest version. + # When `install-mode` is `goinstall` the value can be v1.2.3, `latest`, or the hash of a commit. + version: v1.61.0 + + # Optional: golangci-lint command line arguments. + # + # Note: By default, the `.golangci.yml` file should be at the root of the repository. + # The location of the configuration file can be changed by using `--config=` + # args: --timeout=30m --config=/my/path/.golangci.yml --issues-exit-code=0 + args: --timeout=30m --issues-exit-code=0 + + # Optional: show only new issues if it's a pull request. The default value is `false`. + # only-new-issues: true + + # Optional: if set to true, then all caching functionality will be completely disabled, + # takes precedence over all other caching options. + # skip-cache: true + + # Optional: if set to true, then the action won't cache or restore ~/go/pkg. + # skip-pkg-cache: true + + # Optional: if set to true, then the action won't cache or restore ~/.cache/go-build. + # skip-build-cache: true + + # Optional: The mode to install golangci-lint. It can be 'binary' or 'goinstall'. + # install-mode: "goinstall" diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d15d997 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +/blazar +blazar.toml +proxy.toml diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000..6cc435a --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,70 @@ +--- +run: + go: '1.22' + +linters: + enable: + - bodyclose + - copyloopvar + - errcheck + - errorlint + - gocheckcompilerdirectives + - goconst + - gocritic + - gofmt + - goimports + - goprintffuncname + - gosec + - gosimple + - govet + - intrange + - ineffassign + - misspell + - nakedret + - noctx + - nolintlint + - revive + - staticcheck + - stylecheck + - testifylint + - unconvert + - unparam + - unused + - whitespace + +issues: + exclude-rules: + # Related to file sizes. + - path: .go + linters: [gosec] + text: "G114: Use of net/http serve function that has no support for setting timeouts" + + - path: .go + linters: [stylecheck] + text: "ST1003: should not use underscores in package names" + + - path: .go + linters: [stylecheck] + text: "ST1003: should not use ALL_CAPS in Go names; use CamelCase instead" + + - path: .go + linters: [revive] + text: "var-naming: don't use an underscore in package name" + + - path: .go + linters: [revive] + text: "var-naming: don't use ALL_CAPS in Go names; use CamelCase" + + - path: (.+)_test\.go + linters: + - bodyclose + - gosec + - noctx + + exclude: + # Not all the chains are on v0.47 + - "res.Block is deprecated: please use `sdk_block` instead" + # Blazar support "older" versions v1 and v1beta + - "upgradetypes.SoftwareUpgradeProposal is deprecated: Do not use." + # Linter doesn't catch the cobra check after + - "Error return value of `registerUpgradeCmd.MarkFlagRequired` is not checked" diff --git a/LICENSE b/LICENSE index 261eeb9..03e9851 100644 --- a/LICENSE +++ b/LICENSE @@ -1,201 +1,13 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Copyright 2024 Chorus One AG - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - 1. Definitions. + http://www.apache.org/licenses/LICENSE-2.0 - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..c5279e7 --- /dev/null +++ b/Makefile @@ -0,0 +1,51 @@ +.PHONY: build clean test proto run build-simapp compress-simapp + +Version=$(shell git describe --tags --exact-match 2>/dev/null || echo "devel") +GitStatus=`git status -s` +GitCommit=`git rev-parse HEAD` +BuildTime=`date +%FT%T%z` +BuildGoVersion=`go version` + +LDFLAGS=-ldflags "-w -s \ +-X 'blazar/cmd.BinVersion=${Version}' \ +-X 'blazar/cmd.GitStatus=${GitStatus}' \ +-X 'blazar/cmd.GitCommit=${GitCommit}' \ +-X 'blazar/cmd.BuildTime=${BuildTime}' \ +-X 'blazar/cmd.BuildGoVersion=${BuildGoVersion}' \ +" + +build: + go build -o blazar ${LDFLAGS} + +run: + go run ./... + +clean: + go clean + +test: + go test -mod=readonly -race ./... + +lint: + golangci-lint run ./... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + protoc --proto_path=./proto --go_out=. --go-grpc_out=. --grpc-gateway_out=. --grpc-gateway_opt generate_unbound_methods=true proto/upgrades_registry.proto proto/version_resolver.proto proto/blazar.proto proto/checks.proto + protoc-go-inject-tag -input="internal/pkg/proto/upgrades_registry/*.pb.go" -remove_tag_comment + protoc-go-inject-tag -input="internal/pkg/proto/version_resolver/*.pb.go" -remove_tag_comment + protoc-go-inject-tag -input="internal/pkg/proto/blazar/*.pb.go" -remove_tag_comment + sed -i 's/upgrades_registry "internal\/pkg\/proto\/upgrades_registry"/upgrades_registry "blazar\/internal\/pkg\/proto\/upgrades_registry"/' internal/pkg/proto/version_resolver/version_resolver.pb.go + +build-simapp: + ./testdata/scripts/build_simapp.sh + + cp testdata/scripts/start_simd_with_upgrade.sh ./testdata/daemon/images/v0.0.1/start_simd_with_upgrade.sh + chmod +x ./testdata/daemon/images/v0.0.1/start_simd_with_upgrade.sh + +compress-simapp: + upx ./testdata/daemon/images/v0.0.1/simd-1 + upx ./testdata/daemon/images/v0.0.2/simd-2 diff --git a/README.md b/README.md new file mode 100644 index 0000000..52653f9 --- /dev/null +++ b/README.md @@ -0,0 +1,259 @@ +
+ + Logo + + +

Blazar: Automatic Cosmos SDK Network Upgrades

+ +

+ Life is too short to wait for the upgrade block height! +
+
+ Getting Started + · + CLI + · + Web UI + · + Proxy UI + · + Slack + · + FAQ +

+
+ +## What is Blazar? +Blazar is a standalone application designed to automate network upgrades for [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) based blockchain networks. + +### The Need for Blazar +At [Chorus One](https://chorus.one), we manage over 50 blockchain networks, many of which are part of the Cosmos Ecosystem. Each network has its own upgrade schedule, which can vary from monthly to bi-weekly, depending on the urgency of the upgrade and Cosmos SDK releases. Our 24/7 on-call team handles multiple upgrades weekly. + +The upgrade process is generally straightforward but can be time-consuming. Here's how it typically works: +1. An upgrade is announced via a governance proposal or other communication channels (Discord, Telegram, Slack, Email, etc.). +2. The upgrade details specify the block height and the version network operators should use. +3. At the specified block height, the node halts, and operators must upgrade the binary and restart the node(s). +4. While waiting for consensus, operators often engage in progress updates on Discord. +5. Once the upgrade is successful, operators return to their regular tasks. + +Blazar was created to automate this process, allowing our team to use their time more productively. It currently handles the majority of network upgrades for Cosmos Networks at Chorus One. + +## Requirements: Docker & Docker Compose +Blazar is designed to work with nodes configured and spawned via Docker Compose. + +### Why Not Cosmovisor? +While many operators use [Cosmovisor](https://docs.cosmos.network/main/build/tooling/cosmovisor) with systemd services, this setup doesn't meet our specific needs. Instead of relying on GitHub releases, we [build our own binaries](https://handbook.chorus.one/node-software/build-process.html), ensuring a consistent build environment with Docker. This approach allows us to use exact software versions and generate precise build artifacts (e.g., libwasmvm.so). + +Cosmovisor is designed to run as the parent process of a validator node, replacing node binaries at the upgrade height. However, this model isn't compatible with Docker Compose managed services. To address this, we developed Blazar as a more effective solution tailored to our setup. + +**Note:** If you'd like Blazar to work with systemd services, contributions are welcome! + +| | Blazar | Cosmovisor | +|------------------- |-------------------------------------------- |--------------------------------- | +| Control plane | Docker | [Fork/Exec](https://docs.cosmos.network/main/build/tooling/cosmovisor#design) | +| Upgrade mechanism | Image Tag Update | Replace Binary | +| Configuration | TOML (Blazar) + YAML (docker-compose.yml) | [Custom directory structure](https://docs.cosmos.network/main/build/tooling/cosmovisor#folder-layout) | +| Upgrade strategy | Gov, Coordinated, Uncoordinated | [Gov, Coordinated, Uncoordinated](https://docs.cosmos.network/main/build/tooling/cosmovisor#adding-upgrade-binary) | +| Upgrade scope | Single, Multi-node* | Single node | +| Pre checks | :heavy_check_mark: | :heavy_check_mark: (preupgrade.sh) | +| Post checks | :heavy_check_mark: | :x: | +| Metrics | :heavy_check_mark: | :x: | +| Notifications | :heavy_check_mark: (Slack) | :x: | +| UI + REST + RPC | :heavy_check_mark: | :x: | +| CLI | :heavy_check_mark: | :heavy_check_mark: | +| Upgrade Feeds | Governance, Database, Local | Governance, Local** | + +\* `DATABASE` registered upgrades are executed by multiple nodes feeding from the provider + +\** For Cosmovisor everything looks as [if it was scheduled through governance](https://docs.cosmos.network/main/build/tooling/cosmovisor#detecting-upgrades) + +## Key Features +- **Upgrade Feeds:** Fetch upgrade information from multiple sources like GOVERNANCE, DATABASE, and LOCAL. +- **Upgrade Strategies:** Supports various upgrade scenarios, including height-specific and manually coordinated upgrades. +- **Pre and Post Upgrade Checks:** Automate checks like docker image existence, node and consensus status. +- **Stateful Execution:** Tracks upgrade stages to ensure consistent execution flow. +- **Cosmos SDK Gov/Upgrade Module Compliance:** Understands and respects the Cosmos SDK governance module upgrades. +- **Slack Integration:** Optional Slack notifications for every action Blazar performs. +- **Modern Stack:** Includes CLI, UI, REST, gRPC, Prometheus metrics, and Protobuf. +- **Built by Ops Team:** Developed by individuals with firsthand experience in node operations. + +## How Blazar Works +![Blazar Under the Hood](https://github.com/user-attachments/assets/4c72c53a-44d7-4b74-85d7-5fe193e1560a) + +Blazar constructs a prioritized list of upgrades from multiple providers and takes appropriate actions based on the most recent state. It retrieves block heights from WSS endpoints or periodic gRPC polls and triggers Docker components when the upgrade height is reached. Notifications are sent to logs and Slack (if configured). + +In simple terms, Blazar performs the following steps: +1. **Upgrade List Construction:** Blazar compiles a unified list of upgrades from various providers (database, local, chain), resolving priorities based on the highest precedence. +2. **State Evaluation & Action:** The Blazar daemon reads this list in conjunction with the most recent state, taking relevant actions, such as performing a pre-upgrade check or finalizing the upgrade process. +3. **Block Height Detection:** The daemon tracks block heights via WSS endpoints or periodic gRPC polls. +4. **Upgrade Execution:** When the upgrade height is reached, the corresponding Docker components are executed. +5. **Notification Delivery:** Blazar sends notifications to logs and Slack (if configured). + +While the logic is simple, it's important to understand the differences between the types of upgrades: +1. **GOVERNANCE:** A coordinated upgrade initiated by chain governance, expected to be executed by all validators at a specified block height. +2. **NON-GOVERNANCE COORDINATED:** An upgrade initiated by operators, not by the chain, but it is expected to occur at the same block height across all validators. +3. **NON-GOVERNANCE UNCOORDINATED:** An operator-initiated upgrade, independent of chain governance, that can be executed at any time. + +NOTE: Blazar does one job and does it well, meaning you need one Blazar instance per Cosmos-SDK node. + +NOTE: You are free to choose your upgrade proposal providers. An SQL database is not mandatory - you can opt to use the "LOCAL" provider or both simultaneously, depending on your needs. + +## Getting Started +To run Blazar, you need Go (compiler) and Docker Compose on the target machine: +```sh +$ apt-get install golang +$ apt-get install docker-compose +``` + +Configure and run Blazar: +```sh +$ cp blazar.sample.toml blazar.toml +$ make build +$ ./blazar run --config blazar.toml +``` + +### CLI & REST Interface +Register or list upgrades using the CLI: +```sh +$ ./blazar upgrades list --host 127.0.0.1 --port 5678 +... table with upgrades ... + +$ ./blazar upgrades register --height "13261400" --tag '4.2.0' --type NON_GOVERNANCE_COORDINATED --source DATABASE --host 127.0.0.1 --port 5678 --name 'security upgrade' +``` + +Or use the REST interface: +``` +curl -s http://127.0.0.1:1234/v1/upgrades/list +``` + +### Blazar UI +Quickly register a new version tag and upgrade using the UI. + +![Web UI](https://github.com/user-attachments/assets/834d4903-b5ab-4a54-8f74-ed1768cf7e6f) + + +### Slack Integration +Track the upgrade process in a single Slack thread 🧵. + +![Slack Notifications](https://github.com/user-attachments/assets/d76e7b44-2015-4682-85e3-863b11918662) + + +### Proxy UI +Blazar Proxy consolidates the latest updates from all Blazar instances. Here's how you can run it: +``` +$ cp proxy.sample.toml proxy.toml +$ ./blazar proxy --config proxy.toml +``` + +![Proxy UI](https://github.com/user-attachments/assets/25e5844b-3860-4b46-87ad-9febd2e8aa99) + + +## Frequently Asked Questions +
+ Why do I need to register a version tag separately? + +Cosmos-SDK Software Upgrade Proposals don't explicitly specify the version you must upgrade to. It can be derived from the rich text data within the proposal, such as: +1. A link to the binary release (if present). +2. The proposal title. +3. The human-written text. + +Currently, Blazar does not infer which version should be used. As a network operator, you must provide a version tag; otherwise, Blazar will skip the upgrade. +
+ +
+ What are the upgrade priorities, and why do I need them? + +Consider a scenario where a network operator runs three nodes. The first node uses an image with a patch (e.g., PebbleDB support), while the other two run vanilla upstream images. + +In this configuration, Blazar uses three upgrade sources: +* CHAIN (priority 1) +* DATABASE (priority 2) +* LOCAL (priority 3) + +All three Blazar instances detect a new upgrade from CHAIN. The operator registers a new version in the DATABASE so that every instance knows what to pick up. However, one node requires a patched version. The network operator must register a new version in the LOCAL provider. + +Now, the first node sees two different versions from two providers (DATABASE & LOCAL). Which one should it use? +**The one with the higher priority** + +The end state on each Blazar node is: +1. Node 1 - v1.0.0-patched, priority 3 +2. Nodes 2 & 3 - v1.0.0, priority 2 + +The same logic applies to upgrade entries and versions. +
+ +
+ What happens if I don't register a version tag for an upgrade? + +Blazar will skip the upgrade. +
+ +
+ Blazar doesn't display any upgrades? + +Blazar maintains its own state of all upgrades, which is periodically refreshed at the interval specified in your configuration. If you don't see the upgrades, it is likely that you need to wait for the given interval for Blazar to update the state. + +NOTE: Adding a new version or upgrade via CLI/UI will trigger a state update. +
+ +
+ The upgrade governance proposal passed, but the upgrade is still in the 'SCHEDULED' state? + +Blazar will change the upgrade state from 'SCHEDULED' to 'ACTIVE' when the voting period is over. +
+ +
+ What is the purpose of the 'force cancel' flag? + +There are two ways to cancel an upgrade in Blazar. The standard method creates a `cancellation entry` in the provider storage, such as an SQL database, if no upgrade is registered. Otherwise, it updates the upgrade status field to `CANCELLED` for the upgrade with the highest priority. + +Blazar periodically fetches and updates the list of upgrades at the interval specified in your configuration. But what if you need to cancel the upgrade immediately and can't wait for the next fetch? For such uncommon scenarios, you can use the `force cancel` mode, which sets the `CANCELLED` status directly in the Blazar state machine. + +The force mode works per Blazar instance, so if you have, say, 3 nodes, you would need to force cancel all three via CLI/UI/RPC calls. If you use the `DATABASE` provider, you can simply cancel the upgrade for everyone, but you need to wait for Blazar to pick it up. + +To simplify, think of the `force cancel` as the last line of defense. It is unlikely that you will need it, but it's there just in case. +
+ +
+ I registered a new upgrade, but only one node is 'up to date'? + +Remember that Blazar refreshes its internal state periodically. If you registered a new upgrade on one instance with the 'DATABASE' provider and the other node doesn't see it, you have two options: +1. Wait for Blazar to sync (see 'Time to next sync' in the UI). +2. Force sync via UI/CLI/RPC call. +
+ +
+ Does Blazar work with chains with non-standard gov module (e.g., Neutron)? + +Yes, but with limitations. Neutron is a smart contract chain that implements its own governance (DAO DAO) via an on-chain contract. Blazar currently doesn't understand the custom smart contract logic, therefore the operator cannot use the `CHAIN` provider. However, upgrades can still be executed via: +1. `NON_GOVERNANCE_COORDINATED` - a network operator registers the upgrade at a certain height. +2. `upgrade-info.json` - the Neutron node will put the `upgrade-info.json` on disk prior to the upgrade. A network operator must register a docker version tag for the expected upgrade height. + +The downside of option 2 is the lack of pre-upgrade checks. +
+ +
+ What is the difference between 'compose-file' and 'env-file' upgrade mode? + +When performing a node upgrade, Blazar updates the docker version tag (e.g., `v1.0.0` to `v2.0.0`). That version is stored in the `docker-compose.yaml` file in the following form: +``` +$ cat docker-compose.yaml | grep 'image' +image: .dkr.ecr.us-east-1.amazonaws.com/chorusone/archway:v1.0.0 +``` + +or in the `.env` file: +``` +$ cat docker-compose.yaml | grep 'image' +image: .dkr.ecr.us-east-1.amazonaws.com/chorusone/archway:${VERSION_archway} + +$ cat .env +VERSION_archway=1.0.0 +``` + +> Why do we support both upgrade modes and which one is better? + +The `compose-file` is simpler, but we highly recommend the `env-file` mode. If the version tag is stored in the `.env` file, the blast radius of possible mistakes is very low, unlike editing the whole `docker-compose.yaml` to replace one single variable. +
+ +## License +Blazar is licensed under the Apache 2.0 License. For more detailed information, please refer to the LICENSE file in the repository. diff --git a/blazar.sample.toml b/blazar.sample.toml new file mode 100644 index 0000000..4446836 --- /dev/null +++ b/blazar.sample.toml @@ -0,0 +1,204 @@ +# Absolute path to the docker-compose.yml file running the chain process +compose-file = "" +# Docker Compose service name to perform the upgrade on +compose-service = "" + +# Specify where to upgrade the version: in the Compose file or a dedicated .env file +# Options are "compose-file" and "env-file" +upgrade-mode = "compose-file" + +# [OPTIONAL] version-file is required if upgrade-mode is set to "env-file" +# Absolute path to version file, containing the image version for docker-compose +# version-file = "/etc/docker/compose/{{ chain }}-chorus/.env" + +# Absolute path to the chain home directory on the host system +chain-home = "" +# Info log level (0 for debug, -1 for trace) +# Refer to https://pkg.go.dev/github.com/rs/zerolog#readme-leveled-logging for all options +log-level = 1 + +# The Blazar server binds to this address +# The gRPC server provides an endpoint to register new upgrades and list them +# The HTTP server serves the same endpoints using grpc-gateway and provides metrics +host = "0.0.0.0" +http-port = 1234 +grpc-port = 5678 + +[watchers] +# Interval to poll for upgrade-info.json +# Interpreted as Go's time.Duration +upgrade-info-interval = "300ms" +# Interval to poll the chain for the last height +# If set to zero (0), Blazar will use a streaming WebSocket client to get the height for every new block +# Interpreted as Go's time.Duration +height-interval = "0s" +# Interval to poll for upgrade proposals +# Interpreted as Go's time.Duration +upgrade-proposals-interval = "10m" + +[clients] +# Host of gRPC and CometBFT service exposed by the chain process +host = "" +grpc-port = 9090 +cometbft-port = 25567 +# Timeout value for gRPC method calls +timeout = "10s" + +[compose-cli] +# Timeout for docker-compose down in seconds +# This is passed to docker-compose down --timeout after rounding to the nearest second +# A deadline for docker-compose down is set to compose-down-timeout + 5 seconds +# Minimum 10s +down-timeout = "1m" +# Deadline for docker-compose up -f --force-recreate to run in seconds +# Blazar will consider it a "failed" upgrade if the deadline is reached +# Minimum 10s +up-deadline = "1m" +# Blazar uses environment variables to configure the Cosmos SDK node. For example, to stop the node at X height during +# the coordinated upgrade, Blazar will set _HALT_HEIGHT=X. +# +# If empty, Blazar will fetch the env-prefix (e.g., "GAIAD") from the node via an RPC call. +# Otherwise, the env-prefix will be used as is. +env-prefix = "" + +[checks.pre-upgrade] +# Blazar runs some pre-upgrade checks automatically when the chain height crosses (upgrade-height - blocks) +blocks = 200 +# Which checks to enable; all options are listed here +enabled = ["PULL_DOCKER_IMAGE", "SET_HALT_HEIGHT"] + +# [OPTIONAL] Omit this section if you don't want this check +# Sets up the HALT_HEIGHT env variable that makes the node stop at a certain height. +# This is used by the NON_GOV_COORDINATED upgrade type +[checks.pre-upgrade.set-halt-height] +# Specify how long Blazar should delay the check before the upgrade. For instance, if `blocks = 200` and +# `delay-blocks=10`, Blazar will execute the check when the chain height is at `upgrade-height - 190`. +# This is useful if you run multiple nodes and don't want to restart them simultaneously. +delay-blocks = 0 + +# Blazar runs a post-upgrade check which involves polling a gRPC and a CometBFT endpoint until both are responsive. +# Then, as a second post-upgrade check, it polls the height reporting endpoint to check if the chain height is increasing. +[checks.post-upgrade] +# Which checks to enable; all options are listed here +enabled = ["GRPC_RESPONSIVE", "CHAIN_HEIGHT_INCREASED", "FIRST_BLOCK_VOTED"] + +# [OPTIONAL] Omit this section if you don't want this check +# Polls the gRPC endpoint to check if it is responsive +[checks.post-upgrade.grpc-responsive] +# This interval denotes the minimum time interval Blazar will ensure between two successive gRPC/CometBFT +# endpoint calls in this check +# Interpreted as Go's time.Duration +poll-interval = "1s" +# Specify a timeout after which Blazar gives up on polling the gRPC and CometBFT endpoints +# for responsiveness and treats the upgrade as a failed upgrade. +# Interpreted as Go's time.Duration +timeout = "3m" + +# [OPTIONAL] Omit this section if you don't want this check +# After the services are responsive, Blazar checks if the validator's signature is present +# on the prevote for the first block. +[checks.post-upgrade.first-block-voted] +# This interval denotes the minimum time interval Blazar will ensure between two successive gRPC/CometBFT +# endpoint calls in this check +# Interpreted as Go's time.Duration +poll-interval = "1s" +# If the first block after the upgrade hasn't been signed within the specified interval, Blazar will send you +# a notification. This can help in figuring out if this node is slow or the upgrade is botched. Don't set this too low, +# or you may be spammed with notifications. +# Interpreted as Go's time.Duration +notif-interval = "1m" +# Specify a timeout after which Blazar gives up on this check and treats the upgrade as a failed upgrade. +# Interpreted as Go's time.Duration +timeout = "5m" + +# [OPTIONAL] Omit this section if you don't want this check +# Polls the chain height reporting endpoint to check if the chain height has increased from the upgrade height +[checks.post-upgrade.chain-height-increased] +# This interval denotes the minimum time interval Blazar will ensure between two successive gRPC/CometBFT endpoint calls +# in this check +# Interpreted as Go's time.Duration +poll-interval = "1s" +# After the services are responsive, Blazar checks if the observed height of the chain increases. If the observed height +# doesn't increase within the specified interval, Blazar will send you a notification with the current online prevote VP. +# This can help in figuring out if this node is slow or the upgrade is botched. Don't set this too low, +# or you may be spammed with notifications. +# Interpreted as Go's time.Duration +notif-interval = "1m" +# Specify a timeout after which Blazar gives up on this check and treats the upgrade as a failed upgrade. +# Interpreted as Go's time.Duration +timeout = "5m" + +# [OPTIONAL] Omit this section if you don't want Slack notifications +[slack.webhook-notifier] +webhook-url = "" + +# [OPTIONAL] Omit this section if you don't want Slack notifications +# [slack.bot-notifier] +# auth-token = "" +# channel = "" +# group-messages = true + +# [OPTIONAL] Omit this section if you don't use a Docker credential helper +[docker-credential-helper] +# Absolute path to the Docker credential helper script/binary +# https://docs.docker.com/engine/reference/commandline/login/#credential-helper-protocol +command = "" +# Timeout for the credential helper in seconds +# Interpreted as Go's time.Duration +timeout = "10s" + +[upgrade-registry] +# List providers to enable here +# Enabled providers must have a definition under [upgrade-registry.providers.] +providers = ["chain", "database", "local"] + +# This is the name we will use to differentiate upgrades on this network from others in central sources like DB +network = "" + +# [Optional] Omit this section if you don't want to use a database provider +[upgrade-registry.provider.database] +# Default priority of an upgrade registered, can be an integer in 1-99. +# This can be overridden in the UI/gRPC/REST API +# In case multiple providers provide an upgrade for the same height, the highest priority +# upgrade will be picked by Blazar +default-priority = 3 +host = "" +port = 5432 +db = "" +user = "" +password = "" +# This will override the password field +password-file = "" +# Can be one of disable, allow, prefer, require, verify-ca, verify-full +ssl-mode = "disable" +# Allow GORM to auto-migrate the schema on startup +auto-migrate = false + +# [Optional] Omit this section if you don't want to use a local provider +[upgrade-registry.provider.local] +# Path to the JSON file where the upgrades will be stored and read from. If the file doesn't exist, Blazar will error. +# Create a file with "{}" to start fresh +config-path = "./local-provider.db.json" +# Default priority of an upgrade registered, can be an integer in 1-99. +# This can be overridden in the UI/gRPC/REST API +# See upgrade-registry.provider.database.priority for more info +default-priority = 2 + +# [Optional] Omit this section if you don't want to use a chain provider +[upgrade-registry.provider.chain] +# Default priority of an upgrade registered, can be an integer in 1-99. +# Since the chain provider is effectively a read-only DB, this value is used to determine the priority of the upgrade +# See upgrade-registry.provider.database.priority for more info +default-priority = 1 + +[upgrade-registry.state-machine] +# Only "local" is supported for now +# If no value is provided, the state machine is kept in memory, and all state info will be lost across restarts, which +# might be valuable for debugging +provider = "local" + +# [Optional] Omit this section if you don't want to use a version-resolver +# If the version tag is missing from the upgrade, it will try to be resolved using the version-resolver +[upgrade-registry.version-resolvers] +# Only "database" and "local" are supported for now +providers = ["local", "database"] diff --git a/cmd/provider.go b/cmd/provider.go new file mode 100644 index 0000000..872a75f --- /dev/null +++ b/cmd/provider.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "blazar/cmd/provider" + + "github.com/spf13/cobra" +) + +var providerCmd = &cobra.Command{ + Use: "provider", + Short: "Provider related commands", +} + +func init() { + providerCmd.AddCommand(provider.GetProviderDatabaseCmd()) + rootCmd.AddCommand(providerCmd) +} diff --git a/cmd/provider/database.go b/cmd/provider/database.go new file mode 100644 index 0000000..613cfd4 --- /dev/null +++ b/cmd/provider/database.go @@ -0,0 +1,18 @@ +package provider + +import ( + "blazar/cmd/provider/database" + + "github.com/spf13/cobra" +) + +func GetProviderDatabaseCmd() *cobra.Command { + registerDatabaseCmd := &cobra.Command{ + Use: "database", + Short: "Database provider related commands", + } + + registerDatabaseCmd.AddCommand(database.GetDatabaseMigrationsCmd()) + + return registerDatabaseCmd +} diff --git a/cmd/provider/database/migrations.go b/cmd/provider/database/migrations.go new file mode 100644 index 0000000..db1de83 --- /dev/null +++ b/cmd/provider/database/migrations.go @@ -0,0 +1,113 @@ +package database + +import ( + "fmt" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/errors" + "blazar/internal/pkg/provider/database" + + "github.com/spf13/cobra" + "gorm.io/gorm" + "gorm.io/gorm/logger" +) + +func GetDatabaseMigrationsCmd() *cobra.Command { + registerMigrationCmd := &cobra.Command{ + Use: "migration", + Short: "Database migrations related commands", + } + + registerMigrationCmd.AddCommand(GetMigrationDumpCmd()) + registerMigrationCmd.AddCommand(GetMigrationApplyCmd()) + + return registerMigrationCmd +} + +func GetMigrationDumpCmd() *cobra.Command { + registerDumpCmd := &cobra.Command{ + Use: "dump", + Short: "Dump SQL changes to the screen", + RunE: func(cmd *cobra.Command, _ []string) error { + cfg, err := readConfig(cmd) + if err != nil { + return err + } + + dbCfg := cfg.UpgradeRegistry.Provider.Database + + db, err := database.InitDB(dbCfg, &gorm.Config{ + Logger: logger.Default.LogMode(logger.Silent), + }) + if err != nil { + return err + } + + tx := db.Begin() + var statements []string + if err := tx.Callback().Raw().Register("record_blazar_migration", func(tx *gorm.DB) { + statements = append(statements, tx.Statement.SQL.String()) + }); err != nil { + return err + } + if err := database.AutoMigrate(tx); err != nil { + return err + } + tx.Rollback() + + if err := tx.Callback().Raw().Remove("record_blazar_migration"); err != nil { + return err + } + + for _, s := range statements { + fmt.Println(s) + } + + return nil + }, + } + + return registerDumpCmd +} + +func GetMigrationApplyCmd() *cobra.Command { + registerUCmd := &cobra.Command{ + Use: "apply", + Short: "Perform database auto-migration", + RunE: func(cmd *cobra.Command, _ []string) error { + cfg, err := readConfig(cmd) + if err != nil { + return err + } + + dbCfg := cfg.UpgradeRegistry.Provider.Database + db, err := database.InitDB(dbCfg, &gorm.Config{ + Logger: logger.Default.LogMode(logger.Silent), + }) + if err != nil { + return err + } + + if err := database.AutoMigrate(db); err != nil { + return errors.Wrapf(err, "failed to auto-migrate database") + } + + fmt.Println("Database auto-migration successful") + return nil + }, + } + + return registerUCmd +} + +func readConfig(cmd *cobra.Command) (*config.Config, error) { + cfgFile := cmd.Flag("config").Value.String() + if cfgFile != "" { + cfg, err := config.ReadConfig(cfgFile) + if err != nil { + return nil, err + } + return cfg, nil + } + return nil, nil +} diff --git a/cmd/proxy.go b/cmd/proxy.go new file mode 100644 index 0000000..289a67c --- /dev/null +++ b/cmd/proxy.go @@ -0,0 +1,36 @@ +package cmd + +import ( + "blazar/internal/pkg/errors" + "blazar/internal/pkg/proxy" + + "github.com/spf13/cobra" +) + +var proxyCmd = &cobra.Command{ + Use: "proxy", + Short: "Run the Blazar proxy daemon", + RunE: func(cmd *cobra.Command, _ []string) error { + cfgFile := cmd.Flag("config").Value.String() + cfg, err := proxy.ReadConfig(cfgFile) + if err != nil { + return errors.Wrapf(err, "failed to read the toml config") + } + + if err := cfg.ValidateAll(); err != nil { + return errors.Wrapf(err, "failed to validate config") + } + + // setup daemon + d := proxy.NewProxy() + if err := d.ListenAndServe(cmd.Context(), cfg); err != nil { + return errors.Wrapf(err, "failed to start grpc/http server") + } + + return nil + }, +} + +func init() { + rootCmd.AddCommand(proxyCmd) +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..60da96b --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,26 @@ +package cmd + +import ( + "os" + + "github.com/spf13/cobra" +) + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: "blazar", + Short: "Automatically perform upgrades for cosmos-sdk based chains", +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + err := rootCmd.Execute() + if err != nil { + os.Exit(1) + } +} + +func init() { + rootCmd.PersistentFlags().String("config", "", "config toml file") +} diff --git a/cmd/run.go b/cmd/run.go new file mode 100644 index 0000000..0fedf3b --- /dev/null +++ b/cmd/run.go @@ -0,0 +1,74 @@ +package cmd + +import ( + "blazar/internal/pkg/config" + "blazar/internal/pkg/daemon" + "blazar/internal/pkg/daemon/util" + "blazar/internal/pkg/errors" + "blazar/internal/pkg/log/logger" + "blazar/internal/pkg/log/notification" + "blazar/internal/pkg/metrics" + + "github.com/spf13/cobra" +) + +var runCmd = &cobra.Command{ + Use: "run", + Short: "Run the blazar daemon", + RunE: func(cmd *cobra.Command, _ []string) error { + cfgFile := cmd.Flag("config").Value.String() + cfg, err := config.ReadConfig(cfgFile) + if err != nil { + return errors.Wrapf(err, "failed to read the toml config") + } + + if err := cfg.ValidateAll(); err != nil { + return errors.Wrapf(err, "failed to validate config") + } + + // setup logging level + logger.SetGlobalLogLevel(cfg.LogLevel) + + // setup initial logger + lg := logger.FromContext(cmd.Context()) + + // setup metrics + hostname := util.GetHostname() + metrics, err := metrics.NewMetrics(cfg.ComposeFile, hostname, BinVersion) + if err != nil { + return errors.Wrapf(err, "error creating metrics server") + } + + // setup notifier + notifier := notification.NewFallbackNotifier(cfg, metrics, lg, hostname) + + // setup daemon + d, err := daemon.NewDaemon(cmd.Context(), cfg, metrics) + if err != nil { + return errors.Wrapf(err, "failed to setup new daemon") + } + + if err := d.ListenAndServe(cmd.Context(), cfg); err != nil { + return errors.Wrapf(err, "failed to start grpc/http server") + } + + // setup notifier in the context + ctx := notification.WithContextFallback(cmd.Context(), notifier) + + // initialize daemon (fetch initial state and run basic sanity checks) + if err := d.Init(ctx, cfg); err != nil { + return errors.Wrapf(err, "failed to initialize daemon") + } + + // start the daemon (monitor and run any upcoming upgrade) + if err := d.Run(ctx, cfg); err != nil { + return errors.Wrapf(err, "daemon run failed") + } + + return nil + }, +} + +func init() { + rootCmd.AddCommand(runCmd) +} diff --git a/cmd/upgrades.go b/cmd/upgrades.go new file mode 100644 index 0000000..194bc72 --- /dev/null +++ b/cmd/upgrades.go @@ -0,0 +1,23 @@ +package cmd + +import ( + "blazar/cmd/upgrades" + + "github.com/spf13/cobra" +) + +var upgradesCmd = &cobra.Command{ + Use: "upgrades", + Short: "Upgrades related commands", +} + +func init() { + upgradesCmd.AddCommand(upgrades.GetUpgradeListCmd()) + upgradesCmd.AddCommand(upgrades.GetUpgradeRegisterCmd()) + upgradesCmd.AddCommand(upgrades.GetForceSyncCmd()) + + upgradesCmd.PersistentFlags().String("host", "", "Blazar host to talk to, will override config values if config is specified") + upgradesCmd.PersistentFlags().Uint16("port", 0, "Blazar grpc port to talk to, will override config values if config is specified") + + rootCmd.AddCommand(upgradesCmd) +} diff --git a/cmd/upgrades/cancel.go b/cmd/upgrades/cancel.go new file mode 100644 index 0000000..0d37e3a --- /dev/null +++ b/cmd/upgrades/cancel.go @@ -0,0 +1,97 @@ +package upgrades + +import ( + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + + "blazar/cmd/util" + "blazar/internal/pkg/log/logger" + urproto "blazar/internal/pkg/proto/upgrades_registry" + + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +var ( + // Update state machine directly + force bool +) + +func GetUpgradeCancelCmd() *cobra.Command { + cancelUpgradeCmd := &cobra.Command{ + Use: "cancel", + Short: "Cancel an upgrade", + RunE: func(cmd *cobra.Command, _ []string) error { + lg := logger.NewLogger() + ctx := logger.WithContext(cmd.Context(), lg) + + cfg, err := readConfig(cmd) + if err != nil { + return err + } + + host, port, err := util.GetBlazarHostPort(cmd, cfg) + if err != nil { + return err + } + + addr := net.JoinHostPort(host, strconv.Itoa(int(port))) + conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + + c := urproto.NewUpgradeRegistryClient(conn) + + // handle upgrade fields + if _, ok := urproto.ProviderType_value[source]; !ok { + return fmt.Errorf("invalid source: %s", source) + } + + if height == "" { + return fmt.Errorf("height is required") + } + + upgradeHeight, err := strconv.ParseInt(height, 10, 64) + if err != nil { + return err + } + + cancelRequest := &urproto.CancelUpgradeRequest{ + Height: upgradeHeight, + Source: urproto.ProviderType(urproto.ProviderType_value[source]), + } + + serialized, err := json.MarshalIndent(&cancelRequest, "", " ") + if err != nil { + return err + } + + lg.Info().Msgf("Cancelling upgrade: %s", string(serialized)) + + if _, err = c.CancelUpgrade(ctx, cancelRequest); err != nil { + return err + } + lg.Info().Msgf("Successfully cancelled upgrade for height=%s", height) + return nil + }, + } + + cancelUpgradeCmd.Flags().StringVar(&height, "height", "", "Height to register upgrade for (1234 or +100 for 100 blocks from now)") + cancelUpgradeCmd.Flags().StringVar( + &source, "source", "", + fmt.Sprintf("Upgrade source; valid values: %s", strings.Join(allUpgradeSources, ", ")), + ) + cancelUpgradeCmd.Flags().BoolVar(&force, "force", false, "Forcefully set the state machine status to CANCELLED") + + for _, flagName := range []string{"height", "network", "source"} { + err := cancelUpgradeCmd.MarkFlagRequired(flagName) + cobra.CheckErr(err) + } + + return cancelUpgradeCmd +} diff --git a/cmd/upgrades/force_sync.go b/cmd/upgrades/force_sync.go new file mode 100644 index 0000000..f72dfe6 --- /dev/null +++ b/cmd/upgrades/force_sync.go @@ -0,0 +1,58 @@ +package upgrades + +import ( + "fmt" + "net" + "strconv" + + "blazar/cmd/util" + "blazar/internal/pkg/log/logger" + proto "blazar/internal/pkg/proto/upgrades_registry" + + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +func GetForceSyncCmd() *cobra.Command { + forceSyncCmd := &cobra.Command{ + Use: "force-sync", + Short: "Send a force sync request to synchronise the registry with latest data", + RunE: func(cmd *cobra.Command, _ []string) error { + lg := logger.NewLogger() + ctx := logger.WithContext(cmd.Context(), lg) + + cfg, err := readConfig(cmd) + if err != nil { + return err + } + + host, port, err := util.GetBlazarHostPort(cmd, cfg) + if err != nil { + return err + } + + if err := parseConfig(cfg); err != nil { + return err + } + + addr := net.JoinHostPort(host, strconv.Itoa(int(port))) + conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + + c := proto.NewUpgradeRegistryClient(conn) + response, err := c.ForceSync(ctx, &proto.ForceSyncRequest{}) + if err != nil { + return err + } + + fmt.Printf("Update registry synchronised successfully at height: %d\n", response.Height) + + return nil + }, + } + + return forceSyncCmd +} diff --git a/cmd/upgrades/list.go b/cmd/upgrades/list.go new file mode 100644 index 0000000..7b60280 --- /dev/null +++ b/cmd/upgrades/list.go @@ -0,0 +1,170 @@ +package upgrades + +import ( + "fmt" + "net" + "strconv" + "time" + + "blazar/cmd/util" + "blazar/internal/pkg/config" + "blazar/internal/pkg/log/logger" + blazarproto "blazar/internal/pkg/proto/blazar" + proto "blazar/internal/pkg/proto/upgrades_registry" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +var ( + timeout time.Duration + noCache bool + + filterHeight int64 + filterUpgradeType string + filterProviderType string +) + +func GetUpgradeListCmd() *cobra.Command { + listCmd := &cobra.Command{ + Use: "list", + Short: "List all upgrades", + RunE: func(cmd *cobra.Command, _ []string) error { + lg := logger.NewLogger() + ctx := logger.WithContext(cmd.Context(), lg) + + cfg, err := readConfig(cmd) + if err != nil { + return err + } + + host, port, err := util.GetBlazarHostPort(cmd, cfg) + if err != nil { + return err + } + + if err := parseConfig(cfg); err != nil { + return err + } + + addr := net.JoinHostPort(host, strconv.Itoa(int(port))) + conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + + b := blazarproto.NewBlazarClient(conn) + heightResponse, err := b.GetLastestHeight(ctx, &blazarproto.GetLatestHeightRequest{}) + latestHeight := int64(0) + if err != nil { + // We don't return error because if the upgrade is ongoing, the node is expected to be down and return error. + // In such case we just show UNKNOWN + lg.Warn().Err(err).Msg("Failed to get latest height") + } else { + latestHeight = heightResponse.GetHeight() + } + + requestedUpgradeType, requestedProviderType, requestedHeight, err := filterFlags() + if err != nil { + return err + } + + c := proto.NewUpgradeRegistryClient(conn) + listUpgradesResponse, err := c.ListUpgrades(ctx, &proto.ListUpgradesRequest{ + DisableCache: noCache, + Height: requestedHeight, + Type: requestedUpgradeType, + Source: requestedProviderType, + }) + if err != nil { + return err + } + + tw := table.NewWriter() + tw.AppendHeader(table.Row{ + "Height", + "Tag", + "Network", + "Name", + "Type", + "Status", + "Step", + "Priority", + "Source", + "ProposalID", + "Blocks_to_upgrade", + }) + + for _, upgrade := range listUpgradesResponse.Upgrades { + blocksToUpgrade := "" + if latestHeight != 0 { + blocksToUpgrade = strconv.FormatInt(upgrade.GetHeight()-latestHeight, 10) + } + + tw.AppendRow(table.Row{ + upgrade.Height, + upgrade.Tag, + upgrade.Network, + upgrade.Name, + upgrade.Type, + upgrade.Status, + upgrade.Step, + upgrade.GetPriority(), + upgrade.Source, + upgrade.GetProposalId(), + blocksToUpgrade, + }) + } + + fmt.Println(tw.Render()) + + return nil + }, + } + + listCmd.Flags().DurationVar(&timeout, "timeout", 10*time.Second, "Grpc request timeout") + listCmd.Flags().BoolVar(&noCache, "nocache", false, "Skip upgrade registry cache (slower but more accurate)") + listCmd.Flags().Int64Var(&filterHeight, "height", 0, "Filter by height") + listCmd.Flags().StringVar(&filterUpgradeType, "type", "", "Filter by upgrade type") + listCmd.Flags().StringVar(&filterProviderType, "provider", "", "Filter by provider type") + + return listCmd +} + +func parseConfig(cfg *config.Config) error { + if cfg != nil { + if err := cfg.ValidateBlazarHostGrpcPort(); err != nil { + return err + } + } + return nil +} + +func filterFlags() (*proto.UpgradeType, *proto.ProviderType, *int64, error) { + var requestedUpgradeType *proto.UpgradeType + if filterUpgradeType != "" { + if _, ok := proto.UpgradeType_value[filterUpgradeType]; !ok { + return nil, nil, nil, fmt.Errorf("invalid upgrade type: %s", filterUpgradeType) + } + value := proto.UpgradeType(proto.UpgradeType_value[filterUpgradeType]) + requestedUpgradeType = &value + } + + var requestedProviderType *proto.ProviderType + if filterProviderType != "" { + if _, ok := proto.ProviderType_value[filterProviderType]; !ok { + return nil, nil, nil, fmt.Errorf("invalid provider type type: %s", filterProviderType) + } + value := proto.ProviderType(proto.ProviderType_value[filterProviderType]) + requestedProviderType = &value + } + + var requestedHeight *int64 + if filterHeight != 0 { + requestedHeight = &filterHeight + } + + return requestedUpgradeType, requestedProviderType, requestedHeight, nil +} diff --git a/cmd/upgrades/register.go b/cmd/upgrades/register.go new file mode 100644 index 0000000..301afe6 --- /dev/null +++ b/cmd/upgrades/register.go @@ -0,0 +1,179 @@ +package upgrades + +import ( + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + + "blazar/cmd/util" + "blazar/internal/pkg/config" + "blazar/internal/pkg/log/logger" + blazarproto "blazar/internal/pkg/proto/blazar" + urproto "blazar/internal/pkg/proto/upgrades_registry" + + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +var ( + // Upgrade fields + height string + tag string + name string + upgradeType string + priority int32 + source string + proposalID int64 + + // Upgrade request fields + overwrite bool + + // Other + allUpgradeTypes []string + allUpgradeSources []string +) + +func init() { + for _, upgradeType := range urproto.UpgradeType_name { + allUpgradeTypes = append(allUpgradeTypes, upgradeType) + } + for _, source := range urproto.ProviderType_name { + allUpgradeSources = append(allUpgradeSources, source) + } +} + +func GetUpgradeRegisterCmd() *cobra.Command { + registerUpgradeCmd := &cobra.Command{ + Use: "register", + Short: "Associate image tag with an upgrade at a specific height", + RunE: func(cmd *cobra.Command, _ []string) error { + lg := logger.NewLogger() + ctx := logger.WithContext(cmd.Context(), lg) + + cfg, err := readConfig(cmd) + if err != nil { + return err + } + + host, port, err := util.GetBlazarHostPort(cmd, cfg) + if err != nil { + return err + } + + addr := net.JoinHostPort(host, strconv.Itoa(int(port))) + conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + + c := urproto.NewUpgradeRegistryClient(conn) + + // handle upgrade fields + if _, ok := urproto.UpgradeType_value[upgradeType]; !ok { + return fmt.Errorf("invalid upgrade type: %s", upgradeType) + } + + if _, ok := urproto.ProviderType_value[source]; !ok { + return fmt.Errorf("invalid source: %s", source) + } + + if height == "" { + return fmt.Errorf("height is required") + } + + // handle human friendly syntax for height + // +100 means 100 blocks from now + // 100 means block 100 + upgradeHeight := int64(0) + if height[0] == '+' { + b := blazarproto.NewBlazarClient(conn) + heightResponse, err := b.GetLastestHeight(ctx, &blazarproto.GetLatestHeightRequest{}) + if err != nil { + return err + } + latestHeight := heightResponse.GetHeight() + + heightOffset, err := strconv.ParseInt(height[1:], 10, 64) + if err != nil { + return err + } + upgradeHeight = latestHeight + heightOffset + } else { + upgradeHeight, err = strconv.ParseInt(height, 10, 64) + if err != nil { + return err + } + } + + upgrade := &urproto.Upgrade{ + Height: upgradeHeight, + Tag: tag, + Name: name, + Type: urproto.UpgradeType(urproto.UpgradeType_value[upgradeType]), + // Status is managed by the blazar registry + Status: urproto.UpgradeStatus_UNKNOWN, + Priority: priority, + Source: urproto.ProviderType(urproto.ProviderType_value[source]), + ProposalId: nil, + } + + if proposalID != -1 { + upgrade.ProposalId = &proposalID + } + + serialized, err := json.MarshalIndent(&upgrade, "", " ") + if err != nil { + return err + } + + lg.Info().Msgf("Registering upgrade: %s", string(serialized)) + + if _, err = c.AddUpgrade(ctx, &urproto.AddUpgradeRequest{ + Upgrade: upgrade, + Overwrite: overwrite, + }); err != nil { + return err + } + lg.Info().Msgf("Successfully registered upgrade for height=%s tag=%s", height, tag) + return nil + }, + } + + registerUpgradeCmd.Flags().StringVar(&height, "height", "", "Height to register upgrade for (1234 or +100 for 100 blocks from now)") + registerUpgradeCmd.Flags().StringVar(&tag, "tag", "", "Tag to upgrade to") + registerUpgradeCmd.Flags().StringVar(&name, "name", "", "A short text describing the upgrade") + registerUpgradeCmd.Flags().StringVar( + &upgradeType, "type", "", + fmt.Sprintf("Upgrade type; valid values: %s", strings.Join(allUpgradeTypes, ", ")), + ) + registerUpgradeCmd.Flags().Int32Var(&priority, "priority", 0, "Upgrade priority") + registerUpgradeCmd.Flags().StringVar( + &source, "source", "", + fmt.Sprintf("Upgrade source; valid values: %s", strings.Join(allUpgradeSources, ", ")), + ) + registerUpgradeCmd.Flags().Int64Var(&proposalID, "proposal-id", -1, "Proposal ID") + registerUpgradeCmd.Flags().BoolVar(&overwrite, "overwrite", false, "Overwrite existing upgrade") + + for _, flagName := range []string{"height", "tag", "type", "source"} { + err := registerUpgradeCmd.MarkFlagRequired(flagName) + cobra.CheckErr(err) + } + + return registerUpgradeCmd +} + +// Read the cfg if it is specified in flags +func readConfig(cmd *cobra.Command) (*config.Config, error) { + cfgFile := cmd.Flag("config").Value.String() + if cfgFile != "" { + cfg, err := config.ReadConfig(cfgFile) + if err != nil { + return nil, err + } + return cfg, nil + } + return nil, nil +} diff --git a/cmd/util/util.go b/cmd/util/util.go new file mode 100644 index 0000000..690111f --- /dev/null +++ b/cmd/util/util.go @@ -0,0 +1,47 @@ +package util + +import ( + "blazar/internal/pkg/config" + "blazar/internal/pkg/errors" + + "github.com/spf13/cobra" +) + +func GetBlazarHostPort(cmd *cobra.Command, cfg *config.Config) (string, uint16, error) { + var host string + var port uint16 + + if cfg != nil { + if err := cfg.ValidateBlazarHostGrpcPort(); err != nil { + return "", 0, err + } + host, port = cfg.Host, cfg.GrpcPort + } + + blazarGrpcPort, err := cmd.Flags().GetUint16("port") + if err != nil { + // this should never be hit + panic(err) + } + if blazarGrpcPort != 0 { + port = blazarGrpcPort + } + + blazarHost, err := cmd.Flags().GetString("host") + if err != nil { + // this should never be hit + panic(err) + } + if blazarHost != "" { + host = blazarHost + } + + if port == 0 { + return "", 0, errors.New("blazar grpc port not specified") + } + + if host == "" { + return "", 0, errors.New("blazar host not specified") + } + return host, port, nil +} diff --git a/cmd/version.go b/cmd/version.go new file mode 100644 index 0000000..5b38145 --- /dev/null +++ b/cmd/version.go @@ -0,0 +1,41 @@ +package cmd + +import ( + "fmt" + "runtime" + "strings" + + "blazar/internal/pkg/log/logger" + + "github.com/spf13/cobra" +) + +var ( + BinVersion = "unknown" + GitStatus = "unknown" + GitCommit = "unknown" + BuildTime = "unknown" + BuildGoVersion = "unknown" +) + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Version of the blazar binary", + Run: func(_ *cobra.Command, _ []string) { + logger.NewLogger().Info().Msgf("Blazar version:\n%s", getVersion()) + }, +} + +func getVersion() string { + return fmt.Sprintf("Version=%s\nGitStatus=%s\nGitCommit=%s\nBuildTime=%s\nBuildWith=%s\nRunOn=%s/%s\n", + BinVersion, GitStatus, GitCommit, BuildTime, BuildGoVersion, runtime.GOOS, runtime.GOARCH) +} + +func init() { + rootCmd.AddCommand(versionCmd) + if GitStatus == "" { + GitStatus = "up to date" + } else { + GitStatus = strings.ReplaceAll(strings.ReplaceAll(GitStatus, "\r\n", " | "), "\n", " | ") + } +} diff --git a/cmd/versions.go b/cmd/versions.go new file mode 100644 index 0000000..ce4ab0a --- /dev/null +++ b/cmd/versions.go @@ -0,0 +1,22 @@ +package cmd + +import ( + "blazar/cmd/versions" + + "github.com/spf13/cobra" +) + +var versionsCmd = &cobra.Command{ + Use: "versions", + Short: "Versions related commands", +} + +func init() { + versionsCmd.AddCommand(versions.GetVersionsListCmd()) + versionsCmd.AddCommand(versions.GetVersionRegisterCmd()) + + versionsCmd.PersistentFlags().String("host", "", "Blazar host to talk to, will override config values if config is specified") + versionsCmd.PersistentFlags().Uint16("port", 0, "Blazar grpc port to talk to, will override config values if config is specified") + + rootCmd.AddCommand(versionsCmd) +} diff --git a/cmd/versions/list.go b/cmd/versions/list.go new file mode 100644 index 0000000..da119a2 --- /dev/null +++ b/cmd/versions/list.go @@ -0,0 +1,131 @@ +package versions + +import ( + "fmt" + "net" + "strconv" + "time" + + "blazar/cmd/util" + "blazar/internal/pkg/config" + "blazar/internal/pkg/log/logger" + urproto "blazar/internal/pkg/proto/upgrades_registry" + proto "blazar/internal/pkg/proto/version_resolver" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +var ( + timeout time.Duration + noCache bool + + filterHeight int64 + filterProviderType string +) + +func GetVersionsListCmd() *cobra.Command { + listCmd := &cobra.Command{ + Use: "list", + Short: "List all upgrades", + RunE: func(cmd *cobra.Command, _ []string) error { + lg := logger.NewLogger() + ctx := logger.WithContext(cmd.Context(), lg) + + cfg, err := readConfig(cmd) + if err != nil { + return err + } + + host, port, err := util.GetBlazarHostPort(cmd, cfg) + if err != nil { + return err + } + + if err := parseConfig(cfg); err != nil { + return err + } + + addr := net.JoinHostPort(host, strconv.Itoa(int(port))) + conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + + c := proto.NewVersionResolverClient(conn) + + filterProviderType, filterHeight, err := filterFlags() + if err != nil { + return err + } + + listUpgradesResponse, err := c.ListVersions(ctx, &proto.ListVersionsRequest{ + DisableCache: noCache, + Height: filterHeight, + Source: filterProviderType, + }) + if err != nil { + return err + } + + tw := table.NewWriter() + tw.AppendHeader(table.Row{ + "Height", + "Tag", + "Network", + "Priority", + "Source", + }) + + for _, version := range listUpgradesResponse.Versions { + tw.AppendRow(table.Row{ + version.Height, + version.Tag, + version.Network, + version.GetPriority(), + version.Source, + }) + } + + fmt.Println(tw.Render()) + + return nil + }, + } + + listCmd.Flags().DurationVar(&timeout, "timeout", 10*time.Second, "Grpc request timeout") + listCmd.Flags().BoolVar(&noCache, "nocache", false, "Skip upgrade registry cache (slower but more accurate)") + listCmd.Flags().Int64Var(&filterHeight, "height", 0, "Filter by height") + listCmd.Flags().StringVar(&filterProviderType, "provider", "", "Filter by provider type") + + return listCmd +} + +func parseConfig(cfg *config.Config) error { + if cfg != nil { + if err := cfg.ValidateBlazarHostGrpcPort(); err != nil { + return err + } + } + return nil +} + +func filterFlags() (*urproto.ProviderType, *int64, error) { + var requestedProviderType *urproto.ProviderType + if filterProviderType != "" { + if _, ok := urproto.ProviderType_value[filterProviderType]; !ok { + return nil, nil, fmt.Errorf("invalid provider type type: %s", filterProviderType) + } + value := urproto.ProviderType(urproto.ProviderType_value[filterProviderType]) + requestedProviderType = &value + } + + var requestedHeight *int64 + if filterHeight != 0 { + requestedHeight = &filterHeight + } + + return requestedProviderType, requestedHeight, nil +} diff --git a/cmd/versions/register.go b/cmd/versions/register.go new file mode 100644 index 0000000..42469d8 --- /dev/null +++ b/cmd/versions/register.go @@ -0,0 +1,117 @@ +package versions + +import ( + "fmt" + "net" + "strconv" + "strings" + + "blazar/cmd/util" + "blazar/internal/pkg/config" + "blazar/internal/pkg/log/logger" + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" + + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +var ( + // Version fields + height int64 + tag string + priority int32 + source string + + // Version request fields + overwrite bool + + // Other + allUpgradeSources []string +) + +func init() { + for _, source := range urproto.ProviderType_name { + allUpgradeSources = append(allUpgradeSources, source) + } +} + +func GetVersionRegisterCmd() *cobra.Command { + registerUpgradeCmd := &cobra.Command{ + Use: "register", + Short: "Tell blazar what image tag to upgrade to when it detects an upgrade at the specified chain height", + RunE: func(cmd *cobra.Command, _ []string) error { + lg := logger.NewLogger() + ctx := logger.WithContext(cmd.Context(), lg) + + cfg, err := readConfig(cmd) + if err != nil { + return err + } + + host, port, err := util.GetBlazarHostPort(cmd, cfg) + if err != nil { + return err + } + + addr := net.JoinHostPort(host, strconv.Itoa(int(port))) + conn, err := grpc.NewClient(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return err + } + + c := vrproto.NewVersionResolverClient(conn) + + // handle upgrade fields + if _, ok := urproto.ProviderType_value[source]; !ok { + return fmt.Errorf("invalid source: %s", source) + } + + upgrade := &vrproto.Version{ + Height: height, + Tag: tag, + Priority: priority, + Source: urproto.ProviderType(urproto.ProviderType_value[source]), + } + + if _, err = c.AddVersion(ctx, &vrproto.RegisterVersionRequest{ + Version: upgrade, + Overwrite: overwrite, + }); err != nil { + return err + } + lg.Info().Msgf("Successfully registered version for height=%d tag=%s", height, tag) + return nil + }, + } + + registerUpgradeCmd.Flags().Int64Var(&height, "height", 0, "Height to register upgrade for") + registerUpgradeCmd.Flags().StringVar(&tag, "tag", "", "Tag to upgrade to") + registerUpgradeCmd.Flags().Int32Var(&priority, "priority", 0, "Upgrade priority") + registerUpgradeCmd.Flags().StringVar( + &source, "source", "", + fmt.Sprintf("Upgrade source; valid values: %s", strings.Join(allUpgradeSources, ", ")), + ) + registerUpgradeCmd.Flags().BoolVar(&overwrite, "overwrite", false, "Overwrite existing upgrade") + + for _, flagName := range []string{"height", "tag", "source"} { + err := registerUpgradeCmd.MarkFlagRequired(flagName) + cobra.CheckErr(err) + } + + return registerUpgradeCmd +} + +// Read the cfg if it is specified in flags +func readConfig(cmd *cobra.Command) (*config.Config, error) { + cfgFile := cmd.Flag("config").Value.String() + if cfgFile != "" { + cfg, err := config.ReadConfig(cfgFile) + if err != nil { + return nil, err + } + return cfg, nil + } + return nil, nil +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..a5cf3c2 --- /dev/null +++ b/go.mod @@ -0,0 +1,184 @@ +module blazar + +go 1.22 + +require ( + github.com/BurntSushi/toml v1.3.2 + github.com/cometbft/cometbft v0.37.5 + github.com/compose-spec/compose-go v1.17.0 + github.com/cosmos/cosmos-sdk v0.47.13 + github.com/docker/docker v24.0.7+incompatible + github.com/docker/docker-credential-helpers v0.8.1 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 + github.com/jedib0t/go-pretty/v6 v6.5.8 + github.com/otiai10/copy v1.12.0 + github.com/prometheus/client_golang v1.18.0 + github.com/rs/zerolog v1.32.0 + github.com/slack-go/slack v0.14.0 + github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.9.0 + github.com/testcontainers/testcontainers-go v0.27.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.24.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 + google.golang.org/grpc v1.67.1 + google.golang.org/protobuf v1.34.2 + gorm.io/driver/postgres v1.5.7 + gorm.io/driver/sqlite v1.5.6 + gorm.io/gorm v1.25.10 +) + +require ( + cosmossdk.io/api v0.3.1 // indirect + cosmossdk.io/core v0.5.1 // indirect + cosmossdk.io/depinject v1.0.0-alpha.4 // indirect + cosmossdk.io/errors v1.0.1 // indirect + cosmossdk.io/math v1.3.0 // indirect + dario.cat/mergo v1.0.0 // indirect + filippo.io/edwards25519 v1.0.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/hcsshim v0.11.4 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cockroachdb/errors v1.10.0 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cometbft/cometbft-db v0.7.0 // indirect + github.com/confio/ics23/go v0.9.0 // indirect + github.com/containerd/containerd v1.7.11 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogoproto v1.4.11 // indirect + github.com/cosmos/iavl v0.20.1 // indirect + github.com/cosmos/ledger-cosmos-go v0.12.4 // indirect + github.com/cpuguy83/dockercfg v0.3.1 // indirect + github.com/danieljoos/wincred v1.2.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.23.0 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/glog v1.2.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/huandu/skiplist v1.2.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect + github.com/jackc/pgx/v5 v5.6.0 // indirect + github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect + github.com/mattn/go-sqlite3 v1.14.22 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opencontainers/runc v1.1.5 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/shirou/gopsutil/v3 v3.23.11 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.18.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tidwall/btree v1.6.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + pgregory.net/rapid v1.1.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..a30f1d1 --- /dev/null +++ b/go.sum @@ -0,0 +1,791 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cosmossdk.io/api v0.3.1 h1:NNiOclKRR0AOlO4KIqeaG6PS6kswOMhHD0ir0SscNXE= +cosmossdk.io/api v0.3.1/go.mod h1:DfHfMkiNA2Uhy8fj0JJlOCYOBp4eWUUJ1te5zBGNyIw= +cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= +cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= +cosmossdk.io/depinject v1.0.0-alpha.4 h1:PLNp8ZYAMPTUKyG9IK2hsbciDWqna2z1Wsl98okJopc= +cosmossdk.io/depinject v1.0.0-alpha.4/go.mod h1:HeDk7IkR5ckZ3lMGs/o91AVUc7E596vMaOmslGFM3yU= +cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= +cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= +cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= +cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= +cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= +cosmossdk.io/tools/rosetta v0.2.1 h1:ddOMatOH+pbxWbrGJKRAawdBkPYLfKXutK9IETnjYxw= +cosmossdk.io/tools/rosetta v0.2.1/go.mod h1:Pqdc1FdvkNV3LcNIkYWt2RQY6IP1ge6YWZk8MhhO9Hw= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= +github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8= +github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU= +github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA= +github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c= +github.com/cometbft/cometbft v0.37.5 h1:/U/TlgMh4NdnXNo+YU9T2NMCWyhXNDF34Mx582jlvq0= +github.com/cometbft/cometbft v0.37.5/go.mod h1:QC+mU0lBhKn8r9qvmnq53Dmf3DWBt4VtkcKw2C81wxY= +github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/compose-spec/compose-go v1.17.0 h1:cvje90CU94dQyTnJoHJYjx9yE4Iggse1XmGcO3Qi5ts= +github.com/compose-spec/compose-go v1.17.0/go.mod h1:zR2tP1+kZHi5vJz7PjpW6oMoDji/Js3GHjP+hfjf70Q= +github.com/confio/ics23/go v0.9.0 h1:cWs+wdbS2KRPZezoaaj+qBleXgUk5WOQFMP3CQFGTr4= +github.com/confio/ics23/go v0.9.0/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.7.11 h1:lfGKw3eU35sjV0aG2eYZTiwFEY1pCzxdzicHP3SZILw= +github.com/containerd/containerd v1.7.11/go.mod h1:5UluHxHTX2rdvYuZ5OJTC5m/KJNs0Zs9wVoJm9zf5ZE= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/cosmos-sdk v0.47.13 h1:9d57rl2ilSgc8a6u1JAulqNX/E5w8lbqbRe3NON3Jb4= +github.com/cosmos/cosmos-sdk v0.47.13/go.mod h1:pYMzhTfKFn9AJB5X64Epwe9NgYk0y3v7XN8Ks5xqWoo= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.4.11 h1:LZcMHrx4FjUgrqQSWeaGC1v/TeuVFqSLa43CC6aWR2g= +github.com/cosmos/gogoproto v1.4.11/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= +github.com/cosmos/iavl v0.20.1 h1:rM1kqeG3/HBT85vsZdoSNsehciqUQPWrR4BYmqE2+zg= +github.com/cosmos/iavl v0.20.1/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A= +github.com/cosmos/ledger-cosmos-go v0.12.4 h1:drvWt+GJP7Aiw550yeb3ON/zsrgW0jgh5saFCr7pDnw= +github.com/cosmos/ledger-cosmos-go v0.12.4/go.mod h1:fjfVWRf++Xkygt9wzCsjEBdjcf7wiiY35fv3ctT+k4M= +github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM= +github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= +github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= +github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa h1:L9Ay/slwQ4ERSPaurC+TVkZrM0K98GNrEEo1En3e8as= +github.com/distribution/distribution/v3 v3.0.0-20230214150026-36d8c594d7aa/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= +github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= +github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= +github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jedib0t/go-pretty/v6 v6.5.8 h1:8BCzJdSvUbaDuRba4YVh+SKMGcAAKdkcF3SVFbrHAtQ= +github.com/jedib0t/go-pretty/v6 v6.5.8/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= +github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= +github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/otiai10/copy v1.12.0 h1:cLMgSQnXBs1eehF0Wy/FAGsgDTDmAqFR7rQylBb1nDY= +github.com/otiai10/copy v1.12.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= +github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/slack-go/slack v0.14.0 h1:6c0UTfbRnvRssZUsZ2qe0Iu07VAMPjRqOa6oX8ewF4k= +github.com/slack-go/slack v0.14.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/testcontainers/testcontainers-go v0.27.0 h1:IeIrJN4twonTDuMuBNQdKZ+K97yd7VrmNGu+lDpYcDk= +github.com/testcontainers/testcontainers-go v0.27.0/go.mod h1:+HgYZcd17GshBUZv9b+jKFJ198heWPQq3KQIp2+N+7U= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.5.7 h1:8ptbNJTDbEmhdr62uReG5BGkdQyeasu/FZHxI0IMGnM= +gorm.io/driver/postgres v1.5.7/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA= +gorm.io/driver/sqlite v1.5.6 h1:fO/X46qn5NUEEOZtnjJRWRzZMe8nqJiQ9E+0hi+hKQE= +gorm.io/driver/sqlite v1.5.6/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDah4= +gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s= +gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/pkg/chain_watcher/height_watcher.go b/internal/pkg/chain_watcher/height_watcher.go new file mode 100644 index 0000000..6d5a138 --- /dev/null +++ b/internal/pkg/chain_watcher/height_watcher.go @@ -0,0 +1,106 @@ +package chain_watcher + +import ( + "context" + "time" + + "blazar/internal/pkg/cosmos" + "blazar/internal/pkg/errors" + + ctypes "github.com/cometbft/cometbft/types" +) + +type NewHeight struct { + Height int64 + Error error +} + +type HeightWatcher struct { + Heights <-chan NewHeight + cancel chan<- struct{} +} + +func (hw *HeightWatcher) Cancel() { + hw.cancel <- struct{}{} +} + +func NewPeriodicHeightWatcher(ctx context.Context, cosmosClient *cosmos.Client, heightInterval time.Duration) *HeightWatcher { + ticker := time.NewTicker(heightInterval) + cancel := make(chan struct{}) + heights := make(chan NewHeight) + + go func() { + for { + select { + case <-ticker.C: + height, err := cosmosClient.GetLatestBlockHeight(ctx) + + select { + case heights <- NewHeight{ + Height: height, + Error: err, + }: + + // prevents deadlock with heights channel + case <-cancel: + return + } + // this isn't necessary since we exit in the above select statement + // but this will help in early exit in case cancel is called before the ticker fires + case <-cancel: + return + } + } + }() + + return &HeightWatcher{ + Heights: heights, + cancel: cancel, + } +} + +func NewStreamingHeightWatcher(ctx context.Context, cosmosClient *cosmos.Client) (*HeightWatcher, error) { + cancel := make(chan struct{}) + heights := make(chan NewHeight) + + // subscribe call hangs if the node is not running, this at least prevents + // the watcher from hanging at the start + if _, err := cosmosClient.GetCometbftClient().Status(ctx); err != nil { + return nil, errors.Wrapf(err, "failed to get cometbft status") + } + + // create some wiggle room in case blazar can't process the blocks fast enough + capacity := 10 + + txs, err := cosmosClient.GetCometbftClient().Subscribe(ctx, "blazar-client", "tm.event = 'NewBlock'", capacity) + if err != nil { + return nil, err + } + + go func() { + for { + select { + case tx := <-txs: + if data, ok := tx.Data.(ctypes.EventDataNewBlock); ok { + select { + case heights <- NewHeight{ + Height: data.Block.Header.Height, + Error: nil, + }: + // prevents deadlock with heights channel + case <-cancel: + return + } + } + // this isn't necessary since we exit in the above select statement + // but this will help in early exit in case cancel is called before the new height fires + case <-cancel: + return + } + } + }() + return &HeightWatcher{ + Heights: heights, + cancel: cancel, + }, nil +} diff --git a/internal/pkg/chain_watcher/upgrade_proposals_watcher.go b/internal/pkg/chain_watcher/upgrade_proposals_watcher.go new file mode 100644 index 0000000..b824bc9 --- /dev/null +++ b/internal/pkg/chain_watcher/upgrade_proposals_watcher.go @@ -0,0 +1,87 @@ +package chain_watcher + +import ( + "context" + "time" + + "blazar/internal/pkg/cosmos" + "blazar/internal/pkg/log" + urproto "blazar/internal/pkg/proto/upgrades_registry" + registry "blazar/internal/pkg/upgrades_registry" +) + +type UpgradeProposalsWatcher struct { + ur *registry.UpgradeRegistry + interval time.Duration + Errors <-chan error + cancel chan<- struct{} +} + +func NewUpgradeProposalsWatcher(ctx context.Context, cosmosClient *cosmos.Client, ur *registry.UpgradeRegistry, proposalInterval time.Duration) *UpgradeProposalsWatcher { + ticker := time.NewTicker(proposalInterval) + errors := make(chan error) + cancel := make(chan struct{}) + + logger := log.FromContext(ctx).With("package", "upgrade_proposals_watcher") + lastUpdateTime := time.Now() + + go func() { + for { + select { + case <-ticker.C: + // we don't want to stop the watcher if there is an error + // since it could be a temporary error, like a network issue + // therefore we return the error to the channel and continue + logger.Infof("Attempting to fetch upgrade proposals, last attempt was %f seconds ago", time.Since(lastUpdateTime).Seconds()) + lastUpdateTime = time.Now() + + lastHeight, err := cosmosClient.GetLatestBlockHeight(ctx) + if err != nil { + select { + case errors <- err: + // to prevent deadlock with errors channel + case <-cancel: + return + } + continue + } + + logger.Infof("Fetching the upgrade proposals at height %d", lastHeight) + _, _, upgrades, _, err := ur.Update(ctx, lastHeight, true) + if err != nil { + select { + case errors <- err: + // to prevent deadlock with errors channel + case <-cancel: + return + } + continue + } + + upcomingUpgrades := ur.GetUpcomingUpgradesWithCache(lastHeight, urproto.UpgradeStatus_ACTIVE) + logger.Infof( + "Fetched %d upcoming upgrades in ACTIVE state, out of total %d resolved ones, next attempt in %f seconds", + len(upcomingUpgrades), len(upgrades), proposalInterval.Seconds(), + ) + + // we want to cancel the watcher when the chain upgrade is under progress + case <-cancel: + return + } + } + }() + return &UpgradeProposalsWatcher{ + ur: ur, + cancel: cancel, + interval: proposalInterval, + Errors: errors, + } +} + +func (upw *UpgradeProposalsWatcher) GetInterval() time.Duration { + return upw.interval +} + +func (upw *UpgradeProposalsWatcher) Cancel() { + upw.cancel <- struct{}{} +} diff --git a/internal/pkg/chain_watcher/upgrades_info_watcher.go b/internal/pkg/chain_watcher/upgrades_info_watcher.go new file mode 100644 index 0000000..3f61466 --- /dev/null +++ b/internal/pkg/chain_watcher/upgrades_info_watcher.go @@ -0,0 +1,136 @@ +package chain_watcher + +import ( + "encoding/json" + "fmt" + "os" + "time" + + "blazar/internal/pkg/errors" + "blazar/internal/pkg/file_watcher" + + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +type UpgradesInfoWatcher struct { + // full path to a watched file + filename string + lastInfo upgradetypes.Plan + + Upgrades <-chan NewUpgradeInfo +} + +type NewUpgradeInfo struct { + Plan upgradetypes.Plan + Error error +} + +func NewUpgradeInfoWatcher(upgradeInfoFilePath string, interval time.Duration) (*UpgradesInfoWatcher, error) { + exists, fw, err := file_watcher.NewFileWatcher(upgradeInfoFilePath, interval) + if err != nil { + return nil, errors.Wrapf(err, "error creating file watcher for %s", upgradeInfoFilePath) + } + + // Default to empty plan(height = 0) if file doesn't exist + // Any upgrade will have height > 0 + var info upgradetypes.Plan + if exists { + info, err = parseUpgradeInfoFile(upgradeInfoFilePath) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse upgrade-info.json file") + } + } + + upgrades := make(chan NewUpgradeInfo) + + uiw := &UpgradesInfoWatcher{ + filename: upgradeInfoFilePath, + lastInfo: info, + Upgrades: upgrades, + } + + go func() { + for { + newEvent := <-fw.ChangeEvents + if newEvent.Error != nil { + panic(errors.Wrapf(newEvent.Error, "upgrade info watcher's file watcher observed an error")) + } + if e := newEvent.Event; e == file_watcher.FileCreated || e == file_watcher.FileModified { + upgrade, err := uiw.checkIfUpdateIsNeeded() + + // we don't want to stop the watcher if there is an error here + // since it could be a temporary error + // eg: file created but not written to yet + // send the error to the channel for logging and continue + // we can export these errors as metrics later + var newUpgradeInfo NewUpgradeInfo + if err != nil { + newUpgradeInfo.Error = err + upgrades <- newUpgradeInfo + } else if upgrade != nil { + newUpgradeInfo.Plan = *upgrade + upgrades <- newUpgradeInfo + return + } + } + } + }() + + return uiw, nil +} + +// checkIfUpdateIsNeeded reads update plan from upgrade-info.json +// and returns the plan, if a new upgrade height has been hit +func (uiw *UpgradesInfoWatcher) checkIfUpdateIsNeeded() (*upgradetypes.Plan, error) { + info, err := parseUpgradeInfoFile(uiw.filename) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse upgrade-info.json file") + } + + // The file is newer than what we last saw + // so lets check if the upgrade plan height + // is not equal to that what we last knew + // + // This breaks down in one edge case: + // Lets say the chain hits an upgrade at height 1000 + // and the upgrade-info.json file is created + // Whether the upgrade was successful or not that doesn't matter. + // But for some reason the chain is restored to some height + // lower than 1000, say 900, without touching the upgrade-info.json + // file. Now when the upgrade block + // height is hit again, the upgrade will not be detected. + // + // The modTime of upgrade-info.json may change due to various + // reasons, hence it can't be used as a reliable check for + // an upgrade height being hit. Unfortunately, there is no + // way to detect this edge case, we can at most add a + // warning in the README. + if info.Height != uiw.lastInfo.Height { + uiw.lastInfo = info + return &info, nil + } + + return nil, nil +} + +func parseUpgradeInfoFile(filename string) (upgradetypes.Plan, error) { + var ui upgradetypes.Plan + + f, err := os.Open(filename) + if err != nil { + return upgradetypes.Plan{}, err + } + defer f.Close() + + d := json.NewDecoder(f) + if err := d.Decode(&ui); err != nil { + return upgradetypes.Plan{}, err + } + + // required values must be set + if ui.Height <= 0 || ui.Name == "" { + return upgradetypes.Plan{}, fmt.Errorf("invalid upgrade-info.json content; name and height must be not empty; got: %v", ui) + } + + return ui, err +} diff --git a/internal/pkg/chain_watcher/upgrades_info_watcher_test.go b/internal/pkg/chain_watcher/upgrades_info_watcher_test.go new file mode 100644 index 0000000..9bd2905 --- /dev/null +++ b/internal/pkg/chain_watcher/upgrades_info_watcher_test.go @@ -0,0 +1,138 @@ +package chain_watcher + +import ( + "os" + "path/filepath" + "testing" + "time" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/testutils" + + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseUpgradeInfoFile(t *testing.T) { + cases := []struct { + filename string + expectUpgrade upgradetypes.Plan + expectErr bool + }{ + { + filename: "f1-good.json", + expectUpgrade: upgradetypes.Plan{Name: "upgrade1", Info: "some info", Height: 123}, + expectErr: false, + }, + { + filename: "f2-normalized-name.json", + expectUpgrade: upgradetypes.Plan{Name: "Upgrade2", Info: "some info", Height: 125}, + expectErr: false, + }, + { + filename: "f2-bad-type.json", + expectUpgrade: upgradetypes.Plan{}, + expectErr: true, + }, + { + filename: "f2-bad-type-2.json", + expectUpgrade: upgradetypes.Plan{}, + expectErr: true, + }, + { + filename: "f3-empty.json", + expectUpgrade: upgradetypes.Plan{}, + expectErr: true, + }, + { + filename: "f4-empty-obj.json", + expectUpgrade: upgradetypes.Plan{}, + expectErr: true, + }, + { + filename: "f5-partial-obj-1.json", + expectUpgrade: upgradetypes.Plan{}, + expectErr: true, + }, + { + filename: "f5-partial-obj-2.json", + expectUpgrade: upgradetypes.Plan{}, + expectErr: true, + }, + { + filename: "unknown.json", + expectUpgrade: upgradetypes.Plan{}, + expectErr: true, + }, + } + + for i := range cases { + tc := cases[i] + t.Run(tc.filename, func(t *testing.T) { + ui, err := parseUpgradeInfoFile(filepath.Join(testutils.TestdataDirPath, "upgrade-files", tc.filename)) + if tc.expectErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectUpgrade, ui) + } + }) + } +} + +func TestMonitorUpgrade(t *testing.T) { + t.Run("NoExistingFile", func(t *testing.T) { + chainHome := t.TempDir() + err := os.MkdirAll(filepath.Join(chainHome, "data"), 0755) + require.NoError(t, err) + + cfg := &config.Config{ + ChainHome: chainHome, + Watchers: config.Watchers{ + UIInterval: 5 * time.Millisecond, + }, + } + uiw, err := NewUpgradeInfoWatcher(cfg.UpgradeInfoFilePath(), cfg.Watchers.UIInterval) + require.NoError(t, err) + + go func() { + time.Sleep(10 * time.Millisecond) + testutils.MustCopy(t, "upgrade-files/f1-good.json", cfg.UpgradeInfoFilePath()) + }() + + upgrade := <-uiw.Upgrades + require.NoError(t, upgrade.Error) + assert.Equal(t, upgradetypes.Plan{Name: "upgrade1", Info: "some info", Height: 123}, upgrade.Plan) + }) + + t.Run("UpgradesFileExists", func(t *testing.T) { + chainHome := t.TempDir() + err := os.MkdirAll(filepath.Join(chainHome, "data"), 0755) + require.NoError(t, err) + + cfg := &config.Config{ + ChainHome: chainHome, + Watchers: config.Watchers{ + UIInterval: 5 * time.Millisecond, + }, + } + // copy an upgrade file before creating the watcher + testutils.MustCopy(t, "upgrade-files/f1-good.json", cfg.UpgradeInfoFilePath()) + + uiw, err := NewUpgradeInfoWatcher(cfg.UpgradeInfoFilePath(), cfg.Watchers.UIInterval) + require.NoError(t, err) + + // the old upgrade-info.json file should be loaded + assert.Equal(t, upgradetypes.Plan{Name: "upgrade1", Info: "some info", Height: 123}, uiw.lastInfo) + + go func() { + time.Sleep(10 * time.Millisecond) + testutils.MustCopy(t, "upgrade-files/f2-normalized-name.json", cfg.UpgradeInfoFilePath()) + }() + + upgrade := <-uiw.Upgrades + require.NoError(t, upgrade.Error) + assert.Equal(t, upgradetypes.Plan{Name: "Upgrade2", Info: "some info", Height: 125}, upgrade.Plan) + }) +} diff --git a/internal/pkg/cmd/cmd.go b/internal/pkg/cmd/cmd.go new file mode 100644 index 0000000..edfaea4 --- /dev/null +++ b/internal/pkg/cmd/cmd.go @@ -0,0 +1,97 @@ +package cmd + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "syscall" + "time" + + "blazar/internal/pkg/errors" + "blazar/internal/pkg/log" +) + +// cmdErr is a custom error that holds information about potential cancellation or context timeout +// See: https://github.com/golang/go/issues/21880 +type cmdErr struct { + err error + ctxErr error +} + +func (e cmdErr) Is(target error) bool { + switch target { + case context.DeadlineExceeded, context.Canceled: + return e.ctxErr == context.DeadlineExceeded || e.ctxErr == context.Canceled + } + return false +} + +func (e cmdErr) Error() string { + return e.err.Error() +} + +// CheckOutputWithDeadline returns stdout, stderr, error +func CheckOutputWithDeadline(ctx context.Context, deadline time.Duration, envVars []string, command string, args ...string) (*bytes.Buffer, *bytes.Buffer, error) { + ctx, cancel := context.WithTimeout(ctx, deadline) + defer cancel() + + cmd := exec.CommandContext(ctx, command, args...) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + // Append custom env vars to current-process env vars + // in case of conflict, last entry wins. + cmd.Env = append(os.Environ(), envVars...) + + // After cmd.Start(), when a deadline is triggered, cmd.Wait + // waits for the provided stdout and stderr buffers to close. + // If a grandchild-subprocess is spawned that inherits this stdout and stderr + // and it gets stuck, cmd.Wait will wait forever. cmd.WaitDelay + // allowed us to configure another deadline after which cmd.Wait + // will force close the stdout and stderr buffers. However, this + // still doesn't kill the stuck subprocess. Hence we do the following: + + // Set the process group ID so that we can kill the process and its children + cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} + cmd.Cancel = func() error { + pid := cmd.Process.Pid + if pid <= 0 { + return fmt.Errorf("process group: kill argument %d is invalid", pid) + } + + // to kill all child processes + err := syscall.Kill(pid, syscall.SIGKILL) + if err != nil { + return errors.Wrapf(err, "process group: kill syscall failed") + } + return nil + } + + // Lets configure WaitDelay anyway + cmd.WaitDelay = time.Second + + if err := cmd.Run(); err != nil { + return &stdout, &stderr, cmdErr{ + err: errors.Wrapf( + err, + "command failed, cmd: %q, args: %s, deadline: %s, timed out: %t", + command, args, deadline.String(), ctx.Err() == context.DeadlineExceeded, + ), + ctxErr: ctx.Err(), + } + } + return &stdout, &stderr, nil +} + +func ExecuteWithDeadlineAndLog(ctx context.Context, deadline time.Duration, envVars []string, command string, args ...string) error { + logger := log.FromContext(ctx) + logger.Infof("Executing command %q %s", command, args) + + stdout, stderr, err := CheckOutputWithDeadline(ctx, deadline, envVars, command, args...) + logger.Infof("Command %q args: %s stdout:\n%s", command, args, stdout.String()) + logger.Infof("Command %q args: %s stderr:\n%s", command, args, stderr.String()) + return err +} diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go new file mode 100644 index 0000000..252d2e9 --- /dev/null +++ b/internal/pkg/config/config.go @@ -0,0 +1,625 @@ +package config + +import ( + "fmt" + "os" + "path" + "path/filepath" + "slices" + "strings" + "time" + + "blazar/internal/pkg/errors" + checksproto "blazar/internal/pkg/proto/daemon" + urproto "blazar/internal/pkg/proto/upgrades_registry" + + "github.com/BurntSushi/toml" + "golang.org/x/sys/unix" +) + +type UpgradeMode string + +const ( + UpgradeInComposeFile UpgradeMode = "compose-file" + UpgradeInEnvFile UpgradeMode = "env-file" +) + +var ValidUpgradeModes = []UpgradeMode{UpgradeInEnvFile, UpgradeInComposeFile} + +type SlackWebhookNotifier struct { + WebhookURL string `toml:"webhook-url"` +} + +type SlackBotNotifier struct { + AuthToken string `toml:"auth-token"` + Channel string `toml:"channel"` + GroupMessages bool `toml:"group-messages"` +} + +type Slack struct { + WebhookNotifier *SlackWebhookNotifier `toml:"webhook-notifier"` + BotNotifier *SlackBotNotifier `toml:"bot-notifier"` +} + +type DockerCredentialHelper struct { + Command string `toml:"command"` + Timeout time.Duration `toml:"timeout"` +} + +type Watchers struct { + UIInterval time.Duration `toml:"upgrade-info-interval"` + HInterval time.Duration `toml:"height-interval"` + UPInterval time.Duration `toml:"upgrade-proposals-interval"` +} + +type Clients struct { + Host string `toml:"host"` + GrpcPort uint16 `toml:"grpc-port"` + CometbftPort uint16 `toml:"cometbft-port"` + Timeout time.Duration `toml:"timeout"` +} + +type ComposeCli struct { + DownTimeout time.Duration `toml:"down-timeout"` + UpDeadline time.Duration `toml:"up-deadline"` + EnvPrefix string `toml:"env-prefix"` +} + +type SslMode string + +const ( + Disable SslMode = "disable" + Allow SslMode = "allow" + Prefer SslMode = "prefer" + Require SslMode = "require" + VerifyCa SslMode = "verify-ca" + VerifyFull SslMode = "verify-full" +) + +type ChainProvider struct { + DefaultPriority int32 `toml:"default-priority"` +} + +type DatabaseProvider struct { + DefaultPriority int32 `toml:"default-priority"` + Host string `toml:"host"` + Port uint16 `toml:"port"` + DB string `toml:"db"` + User string `toml:"user"` + Password string `toml:"password"` + PasswordFile string `toml:"password-file"` + SslMode SslMode `toml:"ssl-mode"` + AutoMigrate bool `toml:"auto-migrate"` +} + +type LocalProvider struct { + DefaultPriority int32 `toml:"default-priority"` + ConfigPath string `toml:"config-path"` +} + +type Provider struct { + Chain *ChainProvider `toml:"chain"` + Database *DatabaseProvider `toml:"database"` + Local *LocalProvider `toml:"local"` +} + +type VersionResolvers struct { + Providers []string `toml:"providers"` +} + +type StateMachine struct { + Provider string `toml:"provider"` +} + +type PreUpgrade struct { + Enabled []string `toml:"enabled"` + Blocks int64 `toml:"blocks"` + SetHaltHeight *SetHaltHeight `toml:"set-halt-height"` +} + +type SetHaltHeight struct { + DelayBlocks int64 `toml:"delay-blocks"` +} + +type GrpcResponsive struct { + PollInterval time.Duration `toml:"poll-interval"` + Timeout time.Duration `toml:"timeout"` +} + +type ChainHeightIncreased struct { + PollInterval time.Duration `toml:"poll-interval"` + NotifInterval time.Duration `toml:"notif-interval"` + Timeout time.Duration `toml:"timeout"` +} + +type FirstBlockVoted struct { + PollInterval time.Duration `toml:"poll-interval"` + NotifInterval time.Duration `toml:"notif-interval"` + Timeout time.Duration `toml:"timeout"` +} + +type PostUpgrade struct { + Enabled []string `toml:"enabled"` + GrpcResponsive *GrpcResponsive `toml:"grpc-responsive"` + ChainHeightIncreased *ChainHeightIncreased `toml:"chain-height-increased"` + FirstBlockVoted *FirstBlockVoted `toml:"first-block-voted"` +} + +type Checks struct { + PreUpgrade PreUpgrade `toml:"pre-upgrade"` + PostUpgrade PostUpgrade `toml:"post-upgrade"` +} + +type UpgradeRegistry struct { + Network string `toml:"network"` + Provider Provider `toml:"provider"` + SelectedProviders []string `toml:"providers"` + VersionResolvers *VersionResolvers `toml:"version-resolvers"` + StateMachine StateMachine `toml:"state-machine"` +} + +// The validation of the config and the order of prams in the sample +// toml files follow a DFS traversal of the struct. +type Config struct { + ComposeFile string `toml:"compose-file"` + ComposeService string `toml:"compose-service"` + VersionFile string `toml:"version-file"` + UpgradeMode UpgradeMode `toml:"upgrade-mode"` + ChainHome string `toml:"chain-home"` + LogLevel int8 `toml:"log-level"` + Host string `toml:"host"` + GrpcPort uint16 `toml:"grpc-port"` + HTTPPort uint16 `toml:"http-port"` + Watchers Watchers `toml:"watchers"` + Clients Clients `toml:"clients"` + Compose ComposeCli `toml:"compose-cli"` + Checks Checks `toml:"checks"` + Slack *Slack `toml:"slack"` + CredentialHelper *DockerCredentialHelper `toml:"docker-credential-helper"` + UpgradeRegistry UpgradeRegistry `toml:"upgrade-registry"` +} + +func ReadEnvVar(key string) string { + return os.Getenv("BLAZAR_" + key) +} + +func ReadConfig(cfgFile string) (*Config, error) { + var config Config + _, err := toml.DecodeFile(cfgFile, &config) + if err != nil { + return nil, errors.Wrapf(err, "could not decode config file") + } + return &config, nil +} + +func (cfg *Config) UpgradeInfoFilePath() string { + return filepath.Join(cfg.ChainHome, "data", "upgrade-info.json") +} + +func checkAccess(path string, permBits uint32) error { + err := unix.Access(path, permBits) + if err != nil { + return errors.Wrapf(err, "requested permission bits %03b not found on %q", permBits, path) + } + return nil +} + +func validateDir(dir string, permBits uint32) error { + switch { + case !filepath.IsAbs(dir): + return fmt.Errorf("%q must be an absolute path", dir) + default: + switch dirStat, err := os.Stat(dir); { + case os.IsNotExist(err): + return errors.Wrapf(err, "directory not found") + case err != nil: + return errors.Wrapf(err, "could not stat directory") + case !dirStat.IsDir(): + return fmt.Errorf("the path %q already exists but is not a directory", dir) + default: + return checkAccess(dir, permBits) + } + } +} + +func validateFile(file string, permBits uint32) error { + switch { + case !filepath.IsAbs(file): + return fmt.Errorf("%q must be an absolute path", file) + default: + switch fileStat, err := os.Stat(file); { + case os.IsNotExist(err): + return errors.Wrapf(err, "file not found") + case err != nil: + return errors.Wrapf(err, "could not stat file") + case fileStat.IsDir(): + return fmt.Errorf("the path %q already exists but is not a file", file) + default: + return checkAccess(file, permBits) + } + } +} + +func (cfg *Config) ValidateVersionFile() error { + if err := validateFile(cfg.VersionFile, unix.R_OK|unix.W_OK); err != nil { + return errors.Wrapf(err, "error validating version-file") + } + if err := validateDir(path.Dir(cfg.VersionFile), unix.R_OK|unix.W_OK); err != nil { + return errors.Wrapf(err, "error validating version-file") + } + return nil +} + +func (cfg *Config) ValidateComposeFile() error { + if err := validateFile(cfg.ComposeFile, unix.R_OK|unix.W_OK); err != nil { + return errors.Wrapf(err, "error validating compose-file") + } + if err := validateDir(path.Dir(cfg.ComposeFile), unix.R_OK|unix.W_OK); err != nil { + return errors.Wrapf(err, "error validating compose-file") + } + return nil +} + +func (cfg *Config) ValidateChainHome() error { + if err := validateDir(cfg.ChainHome, unix.R_OK|unix.W_OK); err != nil { + return errors.Wrapf(err, "error validating chain-home") + } + // now check if the upgrades-info.json file is readable + // if present, and if it is not present check if the data + // dir is readable + if err := validateFile(cfg.UpgradeInfoFilePath(), unix.R_OK); err != nil { + if os.IsNotExist(errors.Unwrap(err)) { + if err := validateDir(filepath.Join(cfg.ChainHome, "data"), unix.R_OK); err != nil { + return errors.Wrapf(err, "error validating chain-home/data") + } + } else { + return errors.Wrapf(err, "error validating chain-home/data/upgrade-info.json") + } + } + return nil +} + +func (cfg *Config) LoadWebhookURL() error { + url := cfg.Slack.WebhookNotifier.WebhookURL + if url[0] == '/' { + // it must be a path + contents, err := os.ReadFile(url) + if err != nil { + return errors.Wrapf(err, "failed reading %s file", url) + } + cfg.Slack.WebhookNotifier.WebhookURL = strings.TrimSpace(string(contents)) + } + return nil +} + +func (cfg *Config) LoadBotToken() error { + token := cfg.Slack.BotNotifier.AuthToken + if token[0] == '/' { + // it must be a path + contents, err := os.ReadFile(token) + if err != nil { + return errors.Wrapf(err, "failed reading %s file", token) + } + cfg.Slack.BotNotifier.AuthToken = strings.TrimSpace(string(contents)) + } + return nil +} + +func (cfg *Config) ValidateCredentialHelper() error { + if err := validateFile(cfg.CredentialHelper.Command, unix.R_OK|unix.X_OK); err != nil { + return errors.Wrapf(err, "error validating docker-credential-helper.command") + } + if cfg.CredentialHelper.Timeout <= 0 { + return errors.New("docker-credential-helper.timeout cannot be less than or equal to 0") + } + return nil +} + +func (cfg *Config) checkProvider(provider string) error { + switch provider { + case urproto.ProviderType_name[int32(urproto.ProviderType_CHAIN)]: + if cfg.UpgradeRegistry.Provider.Chain == nil { + return errors.New("upgrade-registry.provider.chain cannot be nil") + } + case urproto.ProviderType_name[int32(urproto.ProviderType_DATABASE)]: + if cfg.UpgradeRegistry.Provider.Database == nil { + return errors.New("upgrade-registry.provider.database cannot be nil") + } + case urproto.ProviderType_name[int32(urproto.ProviderType_LOCAL)]: + if cfg.UpgradeRegistry.Provider.Local == nil { + return errors.New("upgrade-registry.provider.local cannot be nil") + } + default: + return fmt.Errorf("unknown provider: %s", provider) + } + return nil +} + +func (cfg *Config) ValidateBlazarHostGrpcPort() error { + if cfg.Host == "" { + return errors.New("host cannot be empty") + } + + if cfg.GrpcPort == 0 { + return errors.New("grpc-port cannot be 0") + } + return nil +} + +func (cfg *Config) ValidateGrpcClient() error { + if cfg.Clients.Host == "" { + return errors.New("clients.host cannot be empty") + } + + if cfg.Clients.GrpcPort == 0 { + return errors.New("clients.grpc-port cannot be 0") + } + return nil +} + +func (cfg *Config) ValidatePreUpgradeChecks() error { + if cfg.Checks.PreUpgrade.Blocks <= 0 { + return errors.New("checks.pre-upgrade.blocks cannot be less than 0") + } + for _, check := range cfg.Checks.PreUpgrade.Enabled { + switch check { + case checksproto.PreCheck_name[int32(checksproto.PreCheck_SET_HALT_HEIGHT)]: + // there is no config so nothing to check + if cfg.Checks.PreUpgrade.SetHaltHeight.DelayBlocks < 0 { + return errors.New("checks.pre-upgrade.set-halt-height cannot be less than 0") + } + case checksproto.PreCheck_name[int32(checksproto.PreCheck_PULL_DOCKER_IMAGE)]: + // there is no config so nothing to check + default: + return fmt.Errorf("unknown value in checks.pre-upgrade.enabled: %s", check) + } + } + + return nil +} + +func (cfg *Config) ValidatePostUpgradeChecks() error { + for _, check := range cfg.Checks.PostUpgrade.Enabled { + switch check { + case checksproto.PostCheck_name[int32(checksproto.PostCheck_GRPC_RESPONSIVE)]: + if cfg.Checks.PostUpgrade.GrpcResponsive == nil { + return errors.New("checks.post-upgrade.grpc-responsive cannot be nil") + } + if cfg.Checks.PostUpgrade.GrpcResponsive.PollInterval <= 0 { + return errors.New("checks.post-upgrade.grpc-responsive.poll-interval cannot be less than or equal to 0") + } + if cfg.Checks.PostUpgrade.GrpcResponsive.Timeout <= 0 { + return errors.New("checks.post-upgrade.grpc-responsive.timeout cannot be less than or equal to 0") + } + case checksproto.PostCheck_name[int32(checksproto.PostCheck_CHAIN_HEIGHT_INCREASED)]: + if cfg.Checks.PostUpgrade.ChainHeightIncreased == nil { + return errors.New("checks.post-upgrade.chain-height-increased cannot be nil") + } + if cfg.Checks.PostUpgrade.ChainHeightIncreased.PollInterval <= 0 { + return errors.New("checks.post-upgrade.chain-height-increased.poll-interval cannot be less than or equal to 0") + } + if cfg.Checks.PostUpgrade.ChainHeightIncreased.NotifInterval <= 0 { + return errors.New("checks.post-upgrade.chain-height-increased.notif-interval cannot be less than or equal to 0") + } + if cfg.Checks.PostUpgrade.ChainHeightIncreased.Timeout <= 0 { + return errors.New("checks.post-upgrade.chain-height-increased.timeout cannot be less than or equal to 0") + } + case checksproto.PostCheck_name[int32(checksproto.PostCheck_FIRST_BLOCK_VOTED)]: + if cfg.Checks.PostUpgrade.FirstBlockVoted == nil { + return errors.New("checks.post-upgrade.first-block-voted cannot be nil") + } + if cfg.Checks.PostUpgrade.FirstBlockVoted.PollInterval <= 0 { + return errors.New("checks.post-upgrade.first-block-voted cannot be less than or equal to 0") + } + if cfg.Checks.PostUpgrade.FirstBlockVoted.NotifInterval <= 0 { + return errors.New("checks.post-upgrade.first-block-voted cannot be less than or equal to 0") + } + if cfg.Checks.PostUpgrade.FirstBlockVoted.Timeout <= 0 { + return errors.New("checks.post-upgrade.first-block-voted cannot be less than or equal to 0") + } + default: + return fmt.Errorf("unknown value in checks.post-upgrade.enabled: %s", check) + } + } + + return nil +} + +func (cfg *Config) ValidateAll() error { + if err := cfg.ValidateComposeFile(); err != nil { + return err + } + + if cfg.ComposeService == "" { + return errors.New("compose-service cannot be empty") + } + + if !slices.Contains(ValidUpgradeModes, cfg.UpgradeMode) { + return fmt.Errorf("invalid upgradeMode '%s', pick one of %+v", cfg.UpgradeMode, ValidUpgradeModes) + } + if cfg.UpgradeMode == "env-file" { + if err := cfg.ValidateVersionFile(); err != nil { + return err + } + } + + if err := cfg.ValidateChainHome(); err != nil { + return err + } + + if cfg.LogLevel < -1 || cfg.LogLevel > 7 { + return errors.New("log-level must be between -1 and 7, refer https://pkg.go.dev/github.com/rs/zerolog#readme-leveled-logging for more info") + } + + if err := cfg.ValidateBlazarHostGrpcPort(); err != nil { + return err + } + + if cfg.HTTPPort == 0 { + return errors.New("http-port cannot be 0") + } + + if cfg.Watchers.UIInterval <= 0 { + return errors.New("watchers.upgrade-info-interval cannot be less than or equal to 0") + } + + if cfg.Watchers.HInterval < 0 { + return errors.New("watchers.height-interval cannot be less than 0") + } + + if cfg.Watchers.UPInterval <= 0 { + return errors.New("watchers.upgrade-proposals-interval cannot be less than or equal to 0") + } + + if err := cfg.ValidateGrpcClient(); err != nil { + return nil + } + + if cfg.Clients.CometbftPort == 0 { + return errors.New("clients.cometbft-port cannot be 0") + } + + if cfg.Clients.Timeout <= 0 { + return errors.New("clients.timeout cannot be less than or equal to 0") + } + + if cfg.Compose.DownTimeout < 10*time.Second { + return errors.New("compose-cli.down-timeout cannot be less than 10s") + } + + if cfg.Compose.UpDeadline < 10*time.Second { + return errors.New("compose-cli.up-deadline cannot be less than 10s") + } + + if err := cfg.ValidatePreUpgradeChecks(); err != nil { + return err + } + + if err := cfg.ValidatePostUpgradeChecks(); err != nil { + return err + } + + // slack notifications are not mandatory + if cfg.Slack != nil { + if cfg.Slack.WebhookNotifier != nil && cfg.Slack.BotNotifier != nil { + return errors.New("there can only be one slack notifier, please choose one webhook or bot notifier") + } + + if cfg.Slack.WebhookNotifier != nil { + if cfg.Slack.WebhookNotifier.WebhookURL == "" { + return errors.New("slack.webhook-notifier.webhook-url cannot be empty") + } + if err := cfg.LoadWebhookURL(); err != nil { + return err + } + } + + if cfg.Slack.BotNotifier != nil { + if cfg.Slack.BotNotifier.AuthToken == "" { + return errors.New("slack.bot-notifier.auth-token cannot be empty") + } + if cfg.Slack.BotNotifier.Channel == "" { + return errors.New("slack.bot-notifier.channel cannot be empty") + } + if err := cfg.LoadBotToken(); err != nil { + return err + } + } + } + + // docker credential helper is not mandatory + if cfg.CredentialHelper != nil { + if err := cfg.ValidateCredentialHelper(); err != nil { + return err + } + } + + if len(cfg.UpgradeRegistry.SelectedProviders) == 0 { + return errors.New("upgrade-registry.providers cannot be empty") + } + for i, provider := range cfg.UpgradeRegistry.SelectedProviders { + provider = strings.ToUpper(provider) + cfg.UpgradeRegistry.SelectedProviders[i] = provider + + if err := cfg.checkProvider(provider); err != nil { + return errors.Wrapf(err, "error validating upgrade-registry.providers") + } + } + + if cfg.UpgradeRegistry.Network == "" { + return errors.New("upgrade-registry.network cannot be empty") + } + + if cfg.UpgradeRegistry.Provider.Chain != nil { + if cfg.UpgradeRegistry.Provider.Chain.DefaultPriority < 1 || cfg.UpgradeRegistry.Provider.Chain.DefaultPriority > 99 { + return errors.New("upgrade-registry.provider.chain.default-priority must be between 1 and 99") + } + } + + if cfg.UpgradeRegistry.Provider.Database != nil { + if cfg.UpgradeRegistry.Provider.Database.DefaultPriority < 1 || cfg.UpgradeRegistry.Provider.Database.DefaultPriority > 99 { + return errors.New("upgrade-registry.provider.database.priority must be between 1 and 99") + } + if cfg.UpgradeRegistry.Provider.Database.Host == "" { + return errors.New("upgrade-registry.provider.database.host cannot be empty") + } + if cfg.UpgradeRegistry.Provider.Database.Port == 0 { + return errors.New("upgrade-registry.provider.database.port cannot be empty") + } + if cfg.UpgradeRegistry.Provider.Database.DB == "" { + return errors.New("upgrade-registry.provider.database.db cannot be empty") + } + if cfg.UpgradeRegistry.Provider.Database.User == "" { + return errors.New("upgrade-registry.provider.database.user cannot be empty") + } + if cfg.UpgradeRegistry.Provider.Database.PasswordFile != "" { + if err := validateFile(cfg.UpgradeRegistry.Provider.Database.PasswordFile, unix.R_OK); err != nil { + return errors.Wrapf(err, "error validating upgrade-registry.provider.database.password-file") + } + contents, err := os.ReadFile(cfg.UpgradeRegistry.Provider.Database.PasswordFile) + if err != nil { + return errors.Wrapf(err, "failed to open file: %s", cfg.UpgradeRegistry.Provider.Database.PasswordFile) + } + cfg.UpgradeRegistry.Provider.Database.Password = strings.TrimSpace(string(contents)) + } else if cfg.UpgradeRegistry.Provider.Database.Password == "" { + return errors.New("upgrade-registry.provider.database.password cannot be empty") + } + + switch cfg.UpgradeRegistry.Provider.Database.SslMode { + case Disable, Allow, Prefer, Require, VerifyCa, VerifyFull: + default: + return errors.New("upgrade-registry.provider.database.ssl-mode must be one of disable, allow, prefer, require, verify-ca, verify-full") + } + } + + if cfg.UpgradeRegistry.Provider.Local != nil { + if cfg.UpgradeRegistry.Provider.Local.DefaultPriority < 1 || cfg.UpgradeRegistry.Provider.Local.DefaultPriority > 99 { + return errors.New("upgrade-registry.provider.local.priority must be between 1 and 99") + } + if cfg.UpgradeRegistry.Provider.Local.ConfigPath == "" { + return errors.New("upgrade-registry.provider.local.config-path cannot be empty") + } + } + + // version resolver is optional + if cfg.UpgradeRegistry.VersionResolvers != nil { + if len(cfg.UpgradeRegistry.VersionResolvers.Providers) == 0 { + return errors.New("upgrade-registry.version-resolvers.providers cannot be empty") + } + for i, provider := range cfg.UpgradeRegistry.VersionResolvers.Providers { + provider = strings.ToUpper(provider) + cfg.UpgradeRegistry.VersionResolvers.Providers[i] = provider + + if err := cfg.checkProvider(provider); err != nil { + return errors.Wrapf(err, "error validating upgrade-registry.version-resolvers.providers") + } + } + } + + cfg.UpgradeRegistry.StateMachine.Provider = strings.ToUpper(cfg.UpgradeRegistry.StateMachine.Provider) + if err := cfg.checkProvider(cfg.UpgradeRegistry.StateMachine.Provider); err != nil { + return errors.Wrapf(err, "error validating upgrade-registry.state-machine.provider") + } + + return nil +} diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go new file mode 100644 index 0000000..22176d7 --- /dev/null +++ b/internal/pkg/config/config_test.go @@ -0,0 +1,459 @@ +package config + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "testing" + "time" + + "blazar/internal/pkg/testutils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// To ensure that the config file is read correctly +// and config.sample.toml is properly formatted +func TestReadConfigToml(t *testing.T) { + cfg, err := ReadConfig("../../../blazar.sample.toml") + require.NoError(t, err) + assert.Equal(t, &Config{ + ComposeFile: "", + ComposeService: "", + UpgradeMode: UpgradeInComposeFile, + ChainHome: "", + LogLevel: 1, + Host: "0.0.0.0", + HTTPPort: 1234, + GrpcPort: 5678, + Watchers: Watchers{ + UIInterval: 300 * time.Millisecond, + HInterval: 0, + UPInterval: 10 * time.Minute, + }, + Clients: Clients{ + Host: "", + GrpcPort: 9090, + CometbftPort: 25567, + Timeout: 10 * time.Second, + }, + Compose: ComposeCli{ + DownTimeout: time.Minute, + UpDeadline: time.Minute, + EnvPrefix: "", + }, + Checks: Checks{ + PreUpgrade: PreUpgrade{ + Enabled: []string{"PULL_DOCKER_IMAGE", "SET_HALT_HEIGHT"}, + Blocks: 200, + SetHaltHeight: &SetHaltHeight{ + DelayBlocks: 0, + }, + }, + PostUpgrade: PostUpgrade{ + Enabled: []string{"GRPC_RESPONSIVE", "CHAIN_HEIGHT_INCREASED", "FIRST_BLOCK_VOTED"}, + GrpcResponsive: &GrpcResponsive{ + PollInterval: 1 * time.Second, + Timeout: 3 * time.Minute, + }, + ChainHeightIncreased: &ChainHeightIncreased{ + PollInterval: 1 * time.Second, + NotifInterval: 1 * time.Minute, + Timeout: 5 * time.Minute, + }, + FirstBlockVoted: &FirstBlockVoted{ + PollInterval: 1 * time.Second, + NotifInterval: 1 * time.Minute, + Timeout: 5 * time.Minute, + }, + }, + }, + Slack: &Slack{ + WebhookNotifier: &SlackWebhookNotifier{ + WebhookURL: "", + }, + }, + CredentialHelper: &DockerCredentialHelper{ + Command: "", + Timeout: 10 * time.Second, + }, + UpgradeRegistry: UpgradeRegistry{ + SelectedProviders: []string{"chain", "database", "local"}, + Network: "", + Provider: Provider{ + Database: &DatabaseProvider{ + DefaultPriority: int32(3), + Host: "", + Port: 5432, + DB: "", + User: "", + Password: "", + PasswordFile: "", + SslMode: Disable, + AutoMigrate: false, + }, + Local: &LocalProvider{ + ConfigPath: "./local-provider.db.json", + DefaultPriority: int32(2), + }, + Chain: &ChainProvider{ + DefaultPriority: int32(1), + }, + }, + + VersionResolvers: &VersionResolvers{ + Providers: []string{"local", "database"}, + }, + StateMachine: StateMachine{ + Provider: "local", + }, + }, + }, cfg) + + assert.NoError(t, cfg.ValidateGrpcClient()) + assert.NoError(t, cfg.ValidatePreUpgradeChecks()) + assert.NoError(t, cfg.ValidatePostUpgradeChecks()) +} + +// To ensure that the config file is read correctly +// and config.sample.toml is properly formatted +func TestValidateComposeFile(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "config", "validate-compose-file", "") + + tests := []struct { + name string + composeFile string + expectedErr error + }{ + { + name: "ValidFile", + composeFile: filepath.Join(tempDir, "valid-compose.yaml"), + expectedErr: nil, + }, + { + name: "FileNotFound", + composeFile: filepath.Join(tempDir, "nonexistent-compose.yaml"), + expectedErr: errors.New("error validating compose-file: file not found: stat " + filepath.Join(tempDir, "nonexistent-compose.yaml") + ": no such file or directory"), + }, + { + name: "InvalidPathIsDir", + composeFile: filepath.Join(tempDir, "some-directory"), + expectedErr: errors.New("error validating compose-file: the path \"" + filepath.Join(tempDir, "some-directory") + "\" already exists but is not a file"), + }, + { + name: "InvalidPathIsRelative", + composeFile: "valid-compose.yaml", + expectedErr: errors.New("error validating compose-file: \"valid-compose.yaml\" must be an absolute path"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg := &Config{ComposeFile: test.composeFile} + + if err := cfg.ValidateComposeFile(); test.expectedErr != nil { + assert.Equal(t, test.expectedErr.Error(), err.Error()) + } else { + require.NoError(t, err) + } + }) + } + + permTests := []struct { + name string + path string + mode fs.FileMode + expectedErr string + }{ + { + name: "ComposeFileNoWrite", + path: filepath.Join(tempDir, "valid-compose.yaml"), + mode: 0555, + expectedErr: "error validating compose-file: requested permission bits 110 not found on \"" + filepath.Join(tempDir, "valid-compose.yaml") + "\": permission denied", + }, + { + name: "ComposeFileNoRead", + path: filepath.Join(tempDir, "valid-compose.yaml"), + mode: 0333, + expectedErr: "error validating compose-file: requested permission bits 110 not found on \"" + filepath.Join(tempDir, "valid-compose.yaml") + "\": permission denied", + }, + { + name: "BaseDirNoWrite", + path: tempDir, + mode: 0555, + expectedErr: "error validating compose-file: requested permission bits 110 not found on \"" + tempDir + "\": permission denied", + }, + { + name: "BaseDirNoRead", + path: tempDir, + mode: 0333, + expectedErr: "error validating compose-file: requested permission bits 110 not found on \"" + tempDir + "\": permission denied", + }, + } + for _, test := range permTests { + t.Run(test.name, func(t *testing.T) { + stat, err := os.Stat(test.path) + require.NoError(t, err) + err = os.Chmod(test.path, test.mode) + require.NoError(t, err) + cfg := &Config{ComposeFile: filepath.Join(tempDir, "valid-compose.yaml")} + err = cfg.ValidateComposeFile() + assert.Equal(t, test.expectedErr, err.Error()) + err = os.Chmod(test.path, stat.Mode()) + require.NoError(t, err) + }) + } +} + +func TestValidateChainHome(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "config", "validate-chain-home", "") + + tests := []struct { + name string + chainHome string + expectedErr error + }{ + { + name: "ValidDir", + chainHome: filepath.Join(tempDir, "chain-home-dir"), + expectedErr: nil, + }, + { + name: "DirNotFound", + chainHome: filepath.Join(tempDir, "nonexistent-dir"), + expectedErr: errors.New("error validating chain-home: directory not found: stat " + filepath.Join(tempDir, "nonexistent-dir") + ": no such file or directory"), + }, + { + name: "InvalidPathIsFile", + chainHome: filepath.Join(tempDir, "chain-home-file"), + expectedErr: errors.New("error validating chain-home: the path \"" + filepath.Join(tempDir, "chain-home-file") + "\" already exists but is not a directory"), + }, + { + name: "InvalidPathIsRelative", + chainHome: "chain-home-dir", + expectedErr: errors.New("error validating chain-home: \"chain-home-dir\" must be an absolute path"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg := &Config{ChainHome: test.chainHome} + + if err := cfg.ValidateChainHome(); test.expectedErr != nil { + assert.Equal(t, test.expectedErr.Error(), err.Error()) + } else { + require.NoError(t, err) + } + }) + } + + permTests := []struct { + name string + path string + chainHome string + mode fs.FileMode + expectedErr string + }{ + { + name: "ChainHomeNoWrite", + path: filepath.Join(tempDir, "chain-home-dir"), + chainHome: filepath.Join(tempDir, "chain-home-dir"), + mode: 0555, + expectedErr: "error validating chain-home: requested permission bits 110 not found on \"" + filepath.Join(tempDir, "chain-home-dir") + "\": permission denied", + }, + { + name: "ChainHomeNoRead", + path: filepath.Join(tempDir, "chain-home-dir"), + chainHome: filepath.Join(tempDir, "chain-home-dir"), + mode: 0333, + expectedErr: "error validating chain-home: requested permission bits 110 not found on \"" + filepath.Join(tempDir, "chain-home-dir") + "\": permission denied", + }, + { + name: "DataDirNoRead", + path: filepath.Join(tempDir, "chain-home-dir/data"), + chainHome: filepath.Join(tempDir, "chain-home-dir"), + mode: 0333, + expectedErr: "error validating chain-home/data: requested permission bits 100 not found on \"" + filepath.Join(tempDir, "chain-home-dir/data") + "\": permission denied", + }, + { + name: "DataDirNoWrite", + path: filepath.Join(tempDir, "chain-home-dir/data"), + chainHome: filepath.Join(tempDir, "chain-home-dir"), + mode: 0555, + expectedErr: "", + }, + { + name: "UpgradeInfoFileNoRead", + path: filepath.Join(tempDir, "chain-home-with-upgrade-json/data/upgrade-info.json"), + chainHome: filepath.Join(tempDir, "chain-home-with-upgrade-json"), + mode: 0333, + expectedErr: "error validating chain-home/data/upgrade-info.json: requested permission bits 100 not found on \"" + filepath.Join(tempDir, "chain-home-with-upgrade-json/data/upgrade-info.json") + "\": permission denied", + }, + { + name: "UpgradeInfoFileNoWrite", + path: filepath.Join(tempDir, "chain-home-with-upgrade-json/data/upgrade-info.json"), + chainHome: filepath.Join(tempDir, "chain-home-with-upgrade-json"), + mode: 0555, + expectedErr: "", + }, + } + for _, test := range permTests { + t.Run(test.name, func(t *testing.T) { + stat, err := os.Stat(test.path) + require.NoError(t, err) + err = os.Chmod(test.path, test.mode) + require.NoError(t, err) + cfg := &Config{ChainHome: test.chainHome} + err = cfg.ValidateChainHome() + if test.expectedErr == "" { + require.NoError(t, err) + } else { + assert.Equal(t, test.expectedErr, err.Error()) + } + err = os.Chmod(test.path, stat.Mode()) + require.NoError(t, err) + }) + } +} + +func TestValidateDockerCredentialHelper(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "config", "validate-docker-credential-helper", "") + + tests := []struct { + name string + dockerCredentialHelper string + expectedErr error + }{ + { + name: "ValidFile", + dockerCredentialHelper: filepath.Join(tempDir, "valid-docker-credential-helper"), + expectedErr: nil, + }, + { + name: "FileNotFound", + dockerCredentialHelper: filepath.Join(tempDir, "nonexistent-docker-credential-helper"), + expectedErr: errors.New("error validating docker-credential-helper.command: file not found: stat " + filepath.Join(tempDir, "nonexistent-docker-credential-helper") + ": no such file or directory"), + }, + { + name: "InvalidPathIsDir", + dockerCredentialHelper: filepath.Join(tempDir, "some-directory"), + expectedErr: errors.New("error validating docker-credential-helper.command: the path \"" + filepath.Join(tempDir, "some-directory") + "\" already exists but is not a file"), + }, + { + name: "InvalidPathIsRelative", + dockerCredentialHelper: "valid-docker-credential-helper", + expectedErr: errors.New("error validating docker-credential-helper.command: \"valid-docker-credential-helper\" must be an absolute path"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg := &Config{ + CredentialHelper: &DockerCredentialHelper{ + Command: test.dockerCredentialHelper, + Timeout: time.Second, + }, + } + + if err := cfg.ValidateCredentialHelper(); test.expectedErr != nil { + assert.Equal(t, test.expectedErr.Error(), err.Error()) + } else { + require.NoError(t, err) + } + }) + } + + permTests := []struct { + name string + path string + mode fs.FileMode + expectedErr string + }{ + { + name: "FileNoExec", + mode: 0444, + expectedErr: "error validating docker-credential-helper.command: requested permission bits 101 not found on \"" + filepath.Join(tempDir, "valid-docker-credential-helper") + "\": permission denied", + }, + { + name: "FileNoRead", + mode: 0333, + expectedErr: "error validating docker-credential-helper.command: requested permission bits 101 not found on \"" + filepath.Join(tempDir, "valid-docker-credential-helper") + "\": permission denied", + }, + } + testPath := filepath.Join(tempDir, "valid-docker-credential-helper") + for _, test := range permTests { + t.Run(test.name, func(t *testing.T) { + stat, err := os.Stat(testPath) + require.NoError(t, err) + + err = os.Chmod(testPath, test.mode) + require.NoError(t, err) + + cfg := &Config{ + CredentialHelper: &DockerCredentialHelper{ + Command: testPath, + Timeout: time.Second, + }, + } + err = cfg.ValidateCredentialHelper() + assert.Equal(t, test.expectedErr, err.Error()) + + err = os.Chmod(testPath, stat.Mode()) + require.NoError(t, err) + }) + } +} + +func TestLoadWebhookUrl(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "config", "load-webhook-url", "") + + doesntExist := filepath.Join(tempDir, "doesnt-exist") + + tests := []struct { + name string + val string + expectedVal string + expectedErr error + }{ + { + name: "Valid", + val: "1234", + expectedVal: "1234", + expectedErr: nil, + }, + { + name: "ValidFile", + val: filepath.Join(tempDir, "webhook"), + expectedVal: "abcd", + expectedErr: nil, + }, + { + name: "NonExistentFile", + val: doesntExist, + expectedVal: doesntExist, + expectedErr: fmt.Errorf("failed reading %s file: open %s: no such file or directory", doesntExist, doesntExist), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cfg := &Config{ + Slack: &Slack{ + WebhookNotifier: &SlackWebhookNotifier{ + WebhookURL: test.val, + }, + }, + } + + if err := cfg.LoadWebhookURL(); test.expectedErr != nil { + assert.Equal(t, test.expectedErr.Error(), err.Error()) + } else { + require.NoError(t, err) + } + assert.Equal(t, test.expectedVal, cfg.Slack.WebhookNotifier.WebhookURL) + }) + } +} diff --git a/internal/pkg/cosmos/client.go b/internal/pkg/cosmos/client.go new file mode 100644 index 0000000..3f38e8a --- /dev/null +++ b/internal/pkg/cosmos/client.go @@ -0,0 +1,309 @@ +package cosmos + +import ( + "context" + "fmt" + "math" + "net" + "strconv" + "strings" + "time" + + "blazar/internal/pkg/errors" + + cstypes "github.com/cometbft/cometbft/consensus/types" + cmtjson "github.com/cometbft/cometbft/libs/json" + cometbft "github.com/cometbft/cometbft/rpc/client/http" + ctypes "github.com/cometbft/cometbft/rpc/core/types" + "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/std" + "github.com/cosmos/cosmos-sdk/types/query" + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +const defaultPaginationLimit = query.DefaultLimit + +type Client struct { + tmClient tmservice.ServiceClient + v1Client v1.QueryClient + v1beta1Client v1beta1.QueryClient + cometbftClient *cometbft.HTTP + + isCometbftStarted bool + timeout time.Duration + paginationLimit uint64 + callOptions []grpc.CallOption +} + +func NewCosmosGrpcOnlyClient(host string, grpcPort uint16, timeout time.Duration) (*Client, error) { + grpcConn, err := createGrpcConn(host, grpcPort) + if err != nil { + return nil, err + } + return &Client{ + tmClient: tmservice.NewServiceClient(grpcConn), + v1Client: v1.NewQueryClient(grpcConn), + v1beta1Client: v1beta1.NewQueryClient(grpcConn), + timeout: timeout, + paginationLimit: defaultPaginationLimit, + // https://github.com/cosmos/cosmos-sdk/blob/a86c2a9980ffc4fed1f8c423889e0628193ffaab/server/config/config.go#L140 + callOptions: []grpc.CallOption{grpc.MaxCallRecvMsgSize(math.MaxInt32)}, + }, nil +} + +func NewClient(host string, grpcPort uint16, cometbftPort uint16, timeout time.Duration) (*Client, error) { + grpcConn, err := createGrpcConn(host, grpcPort) + if err != nil { + return nil, err + } + + cometbftClient, err := cometbft.New(fmt.Sprintf("tcp://%s", net.JoinHostPort(host, strconv.Itoa(int(cometbftPort)))), "/websocket") + if err != nil { + return nil, errors.Wrapf(err, "failed to create cometbft http client") + } + + return &Client{ + tmClient: tmservice.NewServiceClient(grpcConn), + v1Client: v1.NewQueryClient(grpcConn), + v1beta1Client: v1beta1.NewQueryClient(grpcConn), + cometbftClient: cometbftClient, + timeout: timeout, + paginationLimit: defaultPaginationLimit, + // https://github.com/cosmos/cosmos-sdk/blob/a86c2a9980ffc4fed1f8c423889e0628193ffaab/server/config/config.go#L140 + callOptions: []grpc.CallOption{grpc.MaxCallRecvMsgSize(math.MaxInt32)}, + }, nil +} + +func (cc *Client) StartCometbftClient() error { + if cc.isCometbftStarted { + return nil + } + + err := cc.cometbftClient.Start() + if err == nil { + cc.isCometbftStarted = true + } + return err +} + +func (cc *Client) GetLatestBlockHeight(ctx context.Context) (int64, error) { + var cancel context.CancelFunc + + ctx, cancel = context.WithTimeout(ctx, cc.timeout) + defer cancel() + + res, err := cc.tmClient.GetLatestBlock(ctx, &tmservice.GetLatestBlockRequest{}, cc.callOptions...) + if err != nil { + return 0, errors.Wrapf(err, "failed to get latest block") + } + + if res.SdkBlock != nil { + return res.SdkBlock.Header.Height, nil + } + // This is deprecated in sdk v0.47, but many chains don't return the + // alternative sdk_block structure. + return res.Block.Header.Height, nil +} + +func (cc *Client) GetProposalsV1(ctx context.Context) (v1.Proposals, error) { + key := []byte{} + proposals := make(v1.Proposals, 0, 50) + + for { + var cancel context.CancelFunc + + ctx, cancel = context.WithTimeout(ctx, cc.timeout) + defer cancel() + + res, err := cc.v1Client.Proposals(ctx, &v1.QueryProposalsRequest{ + Pagination: &query.PageRequest{ + Key: key, + Limit: cc.paginationLimit, + }, + }, cc.callOptions...) + if err != nil { + return nil, errors.Wrapf(err, "failed to get proposals") + } + + proposals = append(proposals, res.Proposals...) + if key = res.Pagination.NextKey; key == nil { + break + } + } + return proposals, nil +} + +func (cc *Client) GetProposalsV1beta1(ctx context.Context) (v1beta1.Proposals, error) { + key := []byte{} + proposals := make(v1beta1.Proposals, 0, 50) + + for { + var cancel context.CancelFunc + + ctx, cancel = context.WithTimeout(ctx, cc.timeout) + defer cancel() + + res, err := cc.v1beta1Client.Proposals(ctx, &v1beta1.QueryProposalsRequest{ + Pagination: &query.PageRequest{ + Key: key, + Limit: cc.paginationLimit, + }, + }, cc.callOptions...) + if err != nil { + return nil, errors.Wrapf(err, "failed to get proposals") + } + + proposals = append(proposals, res.Proposals...) + if key = res.Pagination.NextKey; key == nil { + break + } + } + return proposals, nil +} + +func (cc *Client) GetStatus(ctx context.Context) (*ctypes.ResultStatus, error) { + response, err := cc.cometbftClient.Status(ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to get status") + } + return response, nil +} + +func (cc *Client) NodeInfo(ctx context.Context) (*tmservice.GetNodeInfoResponse, error) { + response, err := cc.tmClient.GetNodeInfo(ctx, &tmservice.GetNodeInfoRequest{}, cc.callOptions...) + if err != nil { + return nil, errors.Wrapf(err, "failed to get node info") + } + return response, nil +} + +func (cc *Client) GetCometbftClient() *cometbft.HTTP { + return cc.cometbftClient +} + +type PrevoteInfo struct { + Height int64 + Round int32 + Step uint8 + // these are int64 in cometbft + TotalVP int64 + OnlineVP int64 +} + +func (cc *Client) GetPrevoteInfo(ctx context.Context) (*PrevoteInfo, error) { + var cancel context.CancelFunc + + ctx, cancel = context.WithTimeout(ctx, cc.timeout) + defer cancel() + + // DumpConsensusState returns data in a more structured manner + // but unfortunately it is broken on certain versions of tendermint + // https://github.com/cometbft/cometbft/issues/863 + // Also in my experiments, reloading the /dump_consensus_state route + // always shows all nil-votes for all prevotes, hence I am not sure if + // that is reliable. It is meant for debugging so maybe it updated only + // on certain conditions or it is just too slow. On the other hand, + // /consensus_state route shows prevotes information even when the other + // route shows nil-votes for everyone, so this seems more reliable. + // try running: + // watch -n 0.2 "curl -s http://ip:port/dump_consensus_state | jq .result.round_state.votes[0].prevotes_bit_array" + // and + // watch -n 0.2 "curl -s http://ip:port/consensus_state | jq .result.round_state.height_vote_set[0].prevotes_bit_array" + // to see my observations + // + // Additionally, the actual vote information is serialised as a private + // struct with no unmarshaller so we cannot deserialize it without writing our own unmarshaller + // https://github.com/cometbft/cometbft/blob/v0.37.2/consensus/types/height_vote_set.go#L261 + // https://github.com/cometbft/cometbft/blob/v0.37.2/consensus/types/height_vote_set.go#L238 + + res, err := cc.cometbftClient.ConsensusState(ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to get consensus state") + } + var roundState cstypes.RoundStateSimple + if err := cmtjson.Unmarshal(res.RoundState, &roundState); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal round_state") + } + + parts := strings.Split(roundState.HeightRoundStep, "/") + if len(parts) != 3 { + return nil, fmt.Errorf("failed to parse height_round_step=%s", roundState.HeightRoundStep) + } + height, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse height=%s", parts[0]) + } + roundI64, err := strconv.ParseInt(parts[1], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse round=%s", parts[1]) + } + stepU64, err := strconv.ParseUint(parts[2], 10, 8) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse step=%s", parts[2]) + } + round, step := int32(roundI64), uint8(stepU64) + + voteSets := []struct { + // we only need one field + PrevotesBitArray string `json:"prevotes_bit_array"` + }{} + if err := cmtjson.Unmarshal(roundState.Votes, &voteSets); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal height_vote_set") + } + + if len(voteSets) <= int(round) { + // this should never be hit, but just in case + return nil, fmt.Errorf("len(height_vote_set)=%d <= round=%d", len(voteSets), round) + } + currPrevotes := voteSets[round].PrevotesBitArray + // structure for reference: + // "BA{100:____________________________________________________________________________________________________} 0/151215484 = 0.00 + // We want this -> ^^^^^^^^^^^ + parts = strings.Split(currPrevotes, " ") + if len(parts) != 4 { + return nil, fmt.Errorf("unrecognized prevotes_bit_array format: %s", currPrevotes) + } + parts = strings.Split(parts[1], "/") + if len(parts) != 2 { + return nil, fmt.Errorf("unrecognized prevotes_bit_array format: %s", currPrevotes) + } + onlineVP, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse online vp=%s", parts[0]) + } + totalVP, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse total vp=%s", parts[1]) + } + return &PrevoteInfo{ + Height: height, + Round: round, + Step: step, + TotalVP: totalVP, + OnlineVP: onlineVP, + }, nil +} + +func getCodec() *codec.ProtoCodec { + ir := types.NewInterfaceRegistry() + std.RegisterInterfaces(ir) + + return codec.NewProtoCodec(ir) +} + +func createGrpcConn(host string, grpcPort uint16) (*grpc.ClientConn, error) { + grpcConn, err := grpc.NewClient( + net.JoinHostPort(host, strconv.Itoa(int(grpcPort))), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultCallOptions(grpc.ForceCodec(getCodec().GRPCCodec())), + ) + if err != nil { + return nil, errors.Wrapf(err, "failed to connect to grpc") + } + return grpcConn, nil +} diff --git a/internal/pkg/daemon/checks.go b/internal/pkg/daemon/checks.go new file mode 100644 index 0000000..8c1cf04 --- /dev/null +++ b/internal/pkg/daemon/checks.go @@ -0,0 +1,283 @@ +package daemon + +import ( + "context" + "slices" + "time" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/daemon/checks" + "blazar/internal/pkg/docker" + "blazar/internal/pkg/errors" + "blazar/internal/pkg/log" + "blazar/internal/pkg/log/notification" + checksproto "blazar/internal/pkg/proto/daemon" + urproto "blazar/internal/pkg/proto/upgrades_registry" + "blazar/internal/pkg/state_machine" +) + +func (d *Daemon) preUpgradeChecks( + ctx context.Context, + currHeight int64, + sm *state_machine.StateMachine, + dcc *docker.ComposeClient, + composeConfig *config.ComposeCli, + cfg *config.PreUpgrade, + serviceName string, + upgrade *urproto.Upgrade, +) (int64, error) { + ctx = notification.WithUpgradeHeight(ctx, upgrade.Height) + logger := log.FromContext(ctx) + + currStep := sm.GetStep(upgrade.Height) + if !(currStep == urproto.UpgradeStep_MONITORING || currStep == urproto.UpgradeStep_PRE_UPGRADE_CHECK) { + return 0, nil + } + + // notify once about the upcoming upgrade + if currStep == urproto.UpgradeStep_MONITORING { + logger.Infof( + "Detected upcoming upgrade (type: %s, tag: %s, chain: %s) Current height: %d, upgrade height: %d", + upgrade.Type, upgrade.Tag, d.nodeInfo.DefaultNodeInfo.Network, currHeight, upgrade.Height, + ).Notify(ctx) + + if len(cfg.Enabled) == 0 { + logger.Info("No pre upgrade checks configured, skipping").Notify(ctx) + } else { + logger.Infof("Running pre upgrade checks: %v", cfg.Enabled).Notify(ctx) + } + } + + if currStep != urproto.UpgradeStep_PRE_UPGRADE_CHECK { + // NOTE: a failed pre-check doesn't mean the upgrade is not possible in a future. Current strategy for blazar is to: + // 1. Notify user on pre-check failure + // 2. Keep the upgrade status ACTIVE + // 3. When the upgrade hits, blazar will attempt to upgrade the service + // + // At the time of writing this comment, the only retriable pre-check is the docker image fetch, + // which is retried during the upgrade phase. + // Not setting FAILED status here is fine. + // + // Example: Pre-check failed because network operator didn't register an image tag yet. He'll do it in next 100 blocks, + // but if the status is FAILED the operator can't do anyting. + d.stateMachine.MustSetStatusAndStep(upgrade.Height, urproto.UpgradeStatus_ACTIVE, urproto.UpgradeStep_PRE_UPGRADE_CHECK) + } + + // no need to run checks if none are enabled + if len(cfg.Enabled) == 0 { + return 0, nil + } + + if slices.Contains(cfg.Enabled, checksproto.PreCheck_PULL_DOCKER_IMAGE.String()) { + status := sm.GetPreCheckStatus(upgrade.Height, checksproto.PreCheck_PULL_DOCKER_IMAGE) + if status != checksproto.CheckStatus_FINISHED { + sm.SetPreCheckStatus(upgrade.Height, checksproto.PreCheck_PULL_DOCKER_IMAGE, checksproto.CheckStatus_RUNNING) + + logger.Infof( + "Pre upgrade check: %s Checking if upgrade tag %s is available", + checksproto.PreCheck_PULL_DOCKER_IMAGE.String(), upgrade.Tag, + ).Notify(ctx) + + _, newImage, err := checks.PullDockerImage(ctx, d.dcc, serviceName, upgrade.Tag, upgrade.Height) + d.reportPreUpgradeRoutine(ctx, upgrade, newImage, err) + + sm.SetPreCheckStatus(upgrade.Height, checksproto.PreCheck_PULL_DOCKER_IMAGE, checksproto.CheckStatus_FINISHED) + } + } + + if slices.Contains(cfg.Enabled, checksproto.PreCheck_SET_HALT_HEIGHT.String()) { + status := sm.GetPreCheckStatus(upgrade.Height, checksproto.PreCheck_SET_HALT_HEIGHT) + shouldRun := upgrade.Height <= currHeight+(cfg.Blocks-cfg.SetHaltHeight.DelayBlocks) + + if shouldRun && status != checksproto.CheckStatus_FINISHED { + if upgrade.Type == urproto.UpgradeType_NON_GOVERNANCE_COORDINATED { + sm.SetPreCheckStatus(upgrade.Height, checksproto.PreCheck_SET_HALT_HEIGHT, checksproto.CheckStatus_RUNNING) + + logger.Infof( + "Pre upgrade step: %s restarting daemon with halt-height %d", + checksproto.PreCheck_SET_HALT_HEIGHT.String(), upgrade.Height, + ).Notify(ctx) + + err := dcc.RestartServiceWithHaltHeight(ctx, composeConfig, serviceName, upgrade.Height) + d.reportPreUpgradeHaltHeight(ctx, upgrade, err) + } else { + logger.Infof( + "Pre upgrade step: %s restarting daemon with halt-height skipped, as the upgrade is not %s", + checksproto.PreCheck_SET_HALT_HEIGHT.String(), urproto.UpgradeType_NON_GOVERNANCE_COORDINATED.String(), + ).Notify(ctx) + } + + sm.SetPreCheckStatus(upgrade.Height, checksproto.PreCheck_SET_HALT_HEIGHT, checksproto.CheckStatus_FINISHED) + } + + // When the halt height env was set the node will stop itself at the upgrade height + // The trick is that blazar won't receive the block at the upgrade height, because the node will shutdown (depends on the cosmos-sdk version) + // Instead we are waiting for the block prior to the upgrade height and then try to assert if the node is still running + if upgrade.Type == urproto.UpgradeType_NON_GOVERNANCE_COORDINATED && status == checksproto.CheckStatus_FINISHED && currHeight == upgrade.Height-1 { + ticker := time.NewTicker(time.Second) + start := time.Now() + countSameUpgradeHeights, countSameUpgradePlusHeights := 0, 0 + + logger.Infof("Got block %d, waiting for the service to stop itself due to active halt-height setting", currHeight).Notify(ctx) + + for range ticker.C { + logger.Info("Checking if the service has stopped itself") + + isRunning, err := dcc.IsServiceRunning(ctx, serviceName, 5*time.Second) + if err != nil { + return 0, err + } + + if isRunning { + logger.Infof("Service is still running, waiting for the service to stop itself") + if lastHeight, err := d.cosmosClient.GetLatestBlockHeight(ctx); err == nil { + // some cosmos-sdk versions will HALT_HEIGHT at a specified height, but in fact the next block is going to be committed + // this has been fixed but we support this behavior for backward compatibility + if lastHeight == upgrade.Height { + countSameUpgradeHeights++ + } else if lastHeight == upgrade.Height+1 { + countSameUpgradePlusHeights++ + } + } + + // depending on the cosmos-sdk version the HALT_HEIGHT will either exit the node or throw panic and wait + // if we can get the upgrade block 3 times from the endpoint then we are likely in that condition + // + // why 5 times? Most of the cosmos sdk chains won't have higher block times than 5 seconds + // + // TODO: In future versions there is and endpoint `/cosmos/base/node/v1beta1/config` which returns the `halt-height` + if countSameUpgradeHeights > 5 || countSameUpgradePlusHeights > 5 { + logger.Warn("HALT_HEIGHT likely worked but didn't shut down the node, continuing").Notify(ctx) + return upgrade.Height, nil + } + + if time.Since(start) > 2*time.Minute { + err := errors.New("The service didn't stop itself after 2 minutes") + logger.Err(err).Error("Pre upgrade step: SET_HALT_HEIGHT failed").Notify(ctx) + return 0, err + } + continue + } + + logger.Info("The service has stopped itself, continuing with the upgrade") + return upgrade.Height, nil + } + } + } + + return 0, nil +} + +func (d *Daemon) reportPreUpgradeHaltHeight(ctx context.Context, upgrade *urproto.Upgrade, err error) { + ctx = notification.WithUpgradeHeight(ctx, upgrade.Height) + logger := log.FromContext(ctx) + + if err != nil { + logger.Err(err).Warnf("Error setting halt height. Node will not stop itself at %d, requiring manual action", upgrade.Height).Notify(ctx) + } else { + logger.Infof("Halt-height has been set to %d, node will stop itself when it is time to upgrade", upgrade.Height).Notify(ctx) + } +} + +func (d *Daemon) reportPreUpgradeRoutine(ctx context.Context, upgrade *urproto.Upgrade, newImage string, err error) { + ctx = notification.WithUpgradeHeight(ctx, upgrade.Height) + logger := log.FromContext(ctx) + + if err != nil { + msg := "Error performing pre upgrade check. I'll not be able to perform the upgrade, please " + if upgrade.Tag == "" { + msg += "register the image tag" + } else { + msg += "check why the image is not available on the host" + } + logger.Err(err).Warn(msg).Notify(ctx) + } else { + logger.Infof("Upgrade image: %s\nI'll attempt to upgrade when upgrade height is hit", newImage).Notify(ctx) + } +} + +func (d *Daemon) postUpgradeChecks(ctx context.Context, sm *state_machine.StateMachine, cfg *config.PostUpgrade, upgradeHeight int64) (err error) { + defer func() { + // ensure we update the status to failed if any error was encountered + if err != nil { + d.stateMachine.MustSetStatus(upgradeHeight, urproto.UpgradeStatus_FAILED) + } + }() + ctx = notification.WithUpgradeHeight(ctx, upgradeHeight) + logger := log.FromContext(ctx) + + currStep := sm.GetStep(upgradeHeight) + if !(currStep == urproto.UpgradeStep_COMPOSE_FILE_UPGRADE || currStep == urproto.UpgradeStep_POST_UPGRADE_CHECK) { + return nil + } + + // notify once about the post checks + if currStep == urproto.UpgradeStep_COMPOSE_FILE_UPGRADE { + if len(cfg.Enabled) == 0 { + logger.Info("No post upgrade checks configured, skipping").Notify(ctx) + } else { + logger.Infof("Running post upgrade checks: %v", cfg.Enabled).Notify(ctx) + } + } + + if currStep != urproto.UpgradeStep_POST_UPGRADE_CHECK { + d.stateMachine.SetStep(upgradeHeight, urproto.UpgradeStep_POST_UPGRADE_CHECK) + } + + // no need to run checks if none are enabled + if len(cfg.Enabled) == 0 { + return nil + } + + if slices.Contains(cfg.Enabled, checksproto.PostCheck_GRPC_RESPONSIVE.String()) { + status := sm.GetPostCheckStatus(upgradeHeight, checksproto.PostCheck_GRPC_RESPONSIVE) + if status != checksproto.CheckStatus_FINISHED { + sm.SetPostCheckStatus(upgradeHeight, checksproto.PostCheck_GRPC_RESPONSIVE, checksproto.CheckStatus_RUNNING) + + logger.Infof("Post upgrade check: %s Waiting for the grpc and cometbft services to be responsive", checksproto.PostCheck_GRPC_RESPONSIVE.String()).Notify(ctx) + + _, err = checks.GrpcResponsive(ctx, d.cosmosClient, cfg.GrpcResponsive) + sm.SetPostCheckStatus(upgradeHeight, checksproto.PostCheck_GRPC_RESPONSIVE, checksproto.CheckStatus_FINISHED) + + if err != nil { + return errors.Wrapf(err, "post upgrade grpc-endpoint-response check failed") + } + } + } + + if slices.Contains(cfg.Enabled, checksproto.PostCheck_FIRST_BLOCK_VOTED.String()) { + status := sm.GetPostCheckStatus(upgradeHeight, checksproto.PostCheck_FIRST_BLOCK_VOTED) + if status != checksproto.CheckStatus_FINISHED { + sm.SetPostCheckStatus(upgradeHeight, checksproto.PostCheck_FIRST_BLOCK_VOTED, checksproto.CheckStatus_RUNNING) + + logger.Infof("Post upgrade check: %s Waiting for the on-chain block at upgrade height=%d to be signed by us", checksproto.PostCheck_FIRST_BLOCK_VOTED.String(), upgradeHeight).Notify(ctx) + + err = checks.NextBlockSignedPostCheck(ctx, d.cosmosClient, cfg.FirstBlockVoted, upgradeHeight) + sm.SetPostCheckStatus(upgradeHeight, checksproto.PostCheck_FIRST_BLOCK_VOTED, checksproto.CheckStatus_FINISHED) + + if err != nil { + return errors.Wrapf(err, "post upgrade upgrade-block-signed check failed") + } + } + } + + if slices.Contains(cfg.Enabled, checksproto.PostCheck_CHAIN_HEIGHT_INCREASED.String()) { + status := sm.GetPostCheckStatus(upgradeHeight, checksproto.PostCheck_CHAIN_HEIGHT_INCREASED) + if status != checksproto.CheckStatus_FINISHED { + sm.SetPostCheckStatus(upgradeHeight, checksproto.PostCheck_CHAIN_HEIGHT_INCREASED, checksproto.CheckStatus_RUNNING) + + logger.Infof( + "Post upgrade check: %s Waiting for the on-chain latest block height to be > upgrade height=%d", + checksproto.PostCheck_CHAIN_HEIGHT_INCREASED.String(), upgradeHeight, + ).Notify(ctx) + + err = checks.ChainHeightIncreased(ctx, d.cosmosClient, cfg.ChainHeightIncreased, upgradeHeight) + sm.SetPostCheckStatus(upgradeHeight, checksproto.PostCheck_CHAIN_HEIGHT_INCREASED, checksproto.CheckStatus_FINISHED) + + if err != nil { + return errors.Wrapf(err, "post upgrade next-block-height check failed") + } + } + } + return nil +} diff --git a/internal/pkg/daemon/checks/post.go b/internal/pkg/daemon/checks/post.go new file mode 100644 index 0000000..625d073 --- /dev/null +++ b/internal/pkg/daemon/checks/post.go @@ -0,0 +1,273 @@ +package checks + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/cosmos" + "blazar/internal/pkg/errors" + "blazar/internal/pkg/log" + + "github.com/cometbft/cometbft/libs/bytes" +) + +type CheckBlockStatus int + +const ( + // Should not be possible - but prevents usage of a default value on a CheckBlockStatus + InvalidBlockState CheckBlockStatus = iota + // Blazar could not observe whether we voted in time (chain passed upgrade height before the check) + BlockSkipped + // Blazar observed the validator's signature in the pre-vote + BlockSigned + // Blazar has not yet observed the validator's signature in the pre-vote + BlockNotSignedYet +) + +func GrpcResponsive(ctx context.Context, cosmosClient *cosmos.Client, cfg *config.GrpcResponsive) (int64, error) { + logger := log.FromContext(ctx) + + ticker := time.NewTicker(cfg.PollInterval) + height := int64(0) + timeout := time.NewTimer(cfg.Timeout) + + grpcResponsive, cometbftResponsive := false, false + + for { + select { + case <-ticker.C: + if !grpcResponsive { + // lets test if the status endpoint is working + var err error + height, err = cosmosClient.GetLatestBlockHeight(ctx) + if err != nil { + logger.Err(err).Warn("Grpc endpoint gives an error, will retry") + } else { + if height > 0 { + grpcResponsive = true + } else { + // this should never reach but just in case + return 0, fmt.Errorf("grpc endpoint is now responsive but observed chain height=%d <= 0, assuming upgrade failed", height) + } + } + } + if !cometbftResponsive { + // lets test if the /consensus_state endpoint is working + var err error + pvp, err := cosmosClient.GetPrevoteInfo(ctx) + if err != nil { + logger.Err(err).Warn("Cometbft endpoint gives an error, will retry") + } else { + if pvp.TotalVP > 0 { + cometbftResponsive = true + } else { + // this should never reach but just in case + return 0, fmt.Errorf("cometbft endpoint is now responsive but observed total VP=%d <= 0, assuming upgrade failed", pvp.TotalVP) + } + } + } + if cometbftResponsive && grpcResponsive { + logger.Infof("Post upgrade check passed, grpc and cometbft services are now responsive, observed chain height: %d.", height).Notify(ctx) + return height, nil + } + case <-timeout.C: + return 0, fmt.Errorf("services responsiveness post-upgrade check timed out after %s with status grpc responsive=%t cometbft responsive=%t, assuming upgrade failed", cfg.Timeout.String(), grpcResponsive, cometbftResponsive) + case <-ctx.Done(): + return 0, errors.Wrapf(ctx.Err(), "grpc responsiveness post-upgrade check cancelled due to context timeout") + } + } +} + +func ChainHeightIncreased(ctx context.Context, cosmosClient *cosmos.Client, cfg *config.ChainHeightIncreased, upgradeHeight int64) error { + logger := log.FromContext(ctx) + + ticker := time.NewTicker(cfg.PollInterval) + vpReportTicker := time.NewTicker(cfg.NotifInterval) + timeout := time.NewTimer(cfg.Timeout) + + for { + select { + case <-vpReportTicker.C: + pvp, err := cosmosClient.GetPrevoteInfo(ctx) + if err != nil { + logger.Err(err).Warn("Error in getting prevote vp, will retry") + continue + } + + switch { + case pvp.Height == upgradeHeight+1: + logger.Infof("Post upgrade check: height did not increase yet. Prevote status: online VP=%d total VP=%d 2/3+1 VP=%f online VP ratio=%f", pvp.OnlineVP, pvp.TotalVP, (2.0*float32(pvp.TotalVP))/3.0, float32(pvp.OnlineVP)/float32(pvp.TotalVP)).Notify(ctx) + case pvp.Height > upgradeHeight+1: + logger.Infof("Queried for prevote VP but height observed=%d > upgrade height=%d, skipping notification as this post upgrade check should pass soon", pvp.Height, upgradeHeight) + default: + // this should never be hit + return fmt.Errorf("height decreased while querying for prevote vp: %d, assuming upgrade failed", pvp.Height) + } + case <-ticker.C: + // we rely on another endpoint for height, because I don't + // really trust the /consensus_state endpoint yet. + newHeight, err := cosmosClient.GetLatestBlockHeight(ctx) + if err != nil { + logger.Err(err).Warn("Grpc endpoint gives an error, will retry") + continue + } + if newHeight > upgradeHeight { + logger.Infof("Post upgrade check passed, chain height increased, newly observed chain height: %d. All Post upgrade checks passed.", newHeight).Notify(ctx) + return nil + } + + if newHeight == upgradeHeight { + logger.Info("Height didn't increase yet, will retry") + } else { + // this should never reach but just in case + return fmt.Errorf("height decreased after grpc endpoint became responsive: %d, assuming upgrade failed", newHeight) + } + case <-timeout.C: + return fmt.Errorf("height increase post-upgrade check timed out after %s, assuming upgrade failed", cfg.Timeout.String()) + case <-ctx.Done(): + return errors.Wrapf(ctx.Err(), "height increase post-upgrade check cancelled due to context timeout") + } + } +} + +type RoundState struct { + HeightRoundStep string `json:"height/round/step"` + StartTime time.Time `json:"start_time"` + ProposalBlockHash string `json:"proposal_block_hash"` + LockedBlockHash string `json:"locked_block_hash"` + ValidBlockHash string `json:"valid_block_hash"` + HeightVoteSet []struct { + Round int `json:"round"` + Prevotes []string `json:"prevotes"` + PrevotesBitArray string `json:"prevotes_bit_array"` + Precommits []string `json:"precommits"` + PrecommitsBitArray string `json:"precommits_bit_array"` + } `json:"height_vote_set"` + Proposer struct { + Address string `json:"address"` + Index int `json:"index"` + } `json:"proposer"` +} + +type PreVote struct { + SignaturePrefix string +} + +var prevoteRegex = regexp.MustCompile(`Vote\{\d+:([A-Fa-f0-9]+)\s`) + +func ParsePreVote(s string) (PreVote, error) { + matches := prevoteRegex.FindStringSubmatch(s) + if len(matches) < 2 { + return PreVote{}, errors.New("signature not found in prevote string") + } + + return PreVote{ + SignaturePrefix: matches[1], + }, nil +} + +func HasAddressSigned(address bytes.HexBytes, rs RoundState) (bool, error) { + validatorAddress := strings.ToUpper(hex.EncodeToString(address)) + for _, voteSet := range rs.HeightVoteSet { + for _, preVoteStr := range voteSet.Prevotes { + if preVoteStr == "nil-Vote" { + continue + } + preVote, err := ParsePreVote(preVoteStr) + if err != nil { + return false, errors.Wrapf(err, "Could not parse prevote '%s': %v", preVoteStr) + } + upperPrefix := strings.ToUpper(preVote.SignaturePrefix) + if strings.HasPrefix(validatorAddress, upperPrefix) { + return true, nil + } + } + } + return false, nil +} + +func CheckBlockSignedBy(address bytes.HexBytes, height int64, consensusState json.RawMessage) (CheckBlockStatus, error) { + var rs RoundState + if err := json.Unmarshal(consensusState, &rs); err != nil { + return InvalidBlockState, errors.Wrapf(err, "Error in parsing consensus state: %v, will retry") + } + parts := strings.Split(rs.HeightRoundStep, "/") + currentHeight, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return InvalidBlockState, errors.Wrapf(err, "Error in parsing height from consensus state") + } + + if currentHeight > height { + return BlockSkipped, nil + } + + signed, err := HasAddressSigned(address, rs) + + if err != nil { + return InvalidBlockState, err + } + if signed { + return BlockSigned, nil + } + return BlockNotSignedYet, nil +} + +func NextBlockSignedPostCheck(ctx context.Context, cosmosClient *cosmos.Client, postUpgradeChecks *config.FirstBlockVoted, observedHeight int64) error { + logger := log.FromContext(ctx) + + ticker := time.NewTicker(postUpgradeChecks.PollInterval) + notifTicker := time.NewTicker(postUpgradeChecks.NotifInterval) + timeout := time.NewTimer(postUpgradeChecks.Timeout) + + status, err := cosmosClient.GetCometbftClient().Status(ctx) + if err != nil { + return errors.Wrapf(err, "Could not get node status") + } + + nodeAddress := status.ValidatorInfo.Address + logger.Infof("Post upgrade check 2: Waiting to sign the first block after upgrade=%d, address=%s", observedHeight, nodeAddress.String()).Notify(ctx) + if status.ValidatorInfo.VotingPower == 0 { + logger.Info("Post upgrade check 2: skipping signature check, as VP is 0").Notify(ctx) + return nil + } + for { + select { + case <-notifTicker.C: + logger.Info("Post upgrade check: block not signed yet.").Notify(ctx) + case <-ticker.C: + consensusState, err := cosmosClient.GetCometbftClient().ConsensusState(ctx) + if err != nil { + logger.Err(err).Warn("Error in getting consensus state, will retry") + continue + } + state, err := CheckBlockSignedBy(nodeAddress, observedHeight, consensusState.RoundState) + if err != nil { + logger.Err(err).Warn("Error checking if we voted, will retry") + continue + } + switch state { + case BlockSkipped: + logger.Info("Post upgrade check 2 inconclusive, height increased before we could observe our own vote").Notify(ctx) + return nil + case BlockSigned: + logger.Info("Post upgrade check 2 successful, observed our own signature on the upgrade block").Notify(ctx) + return nil + case BlockNotSignedYet: + continue + default: + panic(fmt.Sprintf("programming error: state from block at %d was %d, which is illegal", observedHeight, state)) + } + case <-timeout.C: + return fmt.Errorf("post-upgrade check for fist block signature timed out after %s, assuming upgrade failed", postUpgradeChecks.Timeout.String()) + case <-ctx.Done(): + return errors.Wrapf(ctx.Err(), "post-upgrade check for first block signature cancelled due to context timeout") + } + } +} diff --git a/internal/pkg/daemon/checks/post_test.go b/internal/pkg/daemon/checks/post_test.go new file mode 100644 index 0000000..ec4f9c5 --- /dev/null +++ b/internal/pkg/daemon/checks/post_test.go @@ -0,0 +1,63 @@ +package checks + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/cometbft/cometbft/libs/bytes" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBlockSignedOneSigner(t *testing.T) { + tests := []struct { + name string + stateHeight int + signedBy string + expectedState CheckBlockStatus + }{ + { + name: "Checked Too Late", + stateHeight: 101, + signedBy: "DEADBEEF", + expectedState: BlockSkipped, + }, + { + name: "Did not sign yet", + stateHeight: 100, + signedBy: "DEADBEEF", + expectedState: BlockNotSignedYet, + }, + { + name: "Signed", + stateHeight: 100, + signedBy: "ABCDEF", + expectedState: BlockSigned, + }, + } + + nodeAddress := bytes.HexBytes{0xAB, 0xCD, 0xEF} + var expectSignatureOnHeight int64 = 100 + + for _, test := range tests { + // response from client.GetConsensusState() + consensusState := fmt.Sprintf(`{ + "height/round/step": "%d/0/1", + "height_vote_set": [ + { + "round": 0, + "prevotes": [ + "nil-Vote", + "Vote{43:%s 22107464/00/SIGNED_MSG_TYPE_PREVOTE(Prevote) 000000000000 8CB949D3858C 000000000000 @ 2024-09-09T12:25:51.227378426Z}" + ] + } + ] +}`, test.stateHeight, test.signedBy) + t.Run(test.name, func(t *testing.T) { + state, err := CheckBlockSignedBy(nodeAddress, expectSignatureOnHeight, json.RawMessage(consensusState)) + require.NoError(t, err) + assert.Equal(t, test.expectedState, state) + }) + } +} diff --git a/internal/pkg/daemon/checks/pre.go b/internal/pkg/daemon/checks/pre.go new file mode 100644 index 0000000..2fd2f4c --- /dev/null +++ b/internal/pkg/daemon/checks/pre.go @@ -0,0 +1,41 @@ +package checks + +import ( + "context" + "fmt" + + "blazar/internal/pkg/daemon/util" + "blazar/internal/pkg/docker" + "blazar/internal/pkg/errors" +) + +// return current image, upgrade image, error +func PullDockerImage(ctx context.Context, dcc *docker.ComposeClient, serviceName, upgradeTag string, upgradeHeight int64) (string, string, error) { + if upgradeTag == "" { + return "", "", fmt.Errorf("failed to check docker image, upgrade tag is empty, for upgrade height: %d", upgradeHeight) + } + + currImage, newImage, err := util.GetCurrImageUpgradeImage(dcc, serviceName, upgradeTag) + if err != nil { + return "", "", errors.Wrapf(err, "failed to get new upgrade image for height: %d, tag: %s", upgradeHeight, upgradeTag) + } + + isImagePresent, err := dcc.DockerClient().IsImagePresent(ctx, newImage) + if err != nil { + return "", "", errors.Wrapf(err, "failed to check if new image %s is present", newImage) + } + + if !isImagePresent { + // let's try to pull once + platform, err := dcc.GetPlatform(serviceName) + if err != nil { + return "", "", errors.Wrapf(err, "new image %s is not present on host and failed to get platform from compose file", newImage) + } + + if err := dcc.DockerClient().PullImage(ctx, newImage, platform); err != nil { + return "", "", errors.Wrapf(err, "new image %s is not present on host and pull failed", newImage) + } + } + + return currImage, newImage, nil +} diff --git a/internal/pkg/daemon/daemon.go b/internal/pkg/daemon/daemon.go new file mode 100644 index 0000000..9ecdfde --- /dev/null +++ b/internal/pkg/daemon/daemon.go @@ -0,0 +1,546 @@ +package daemon + +import ( + "context" + "fmt" + "net" + "net/http" + "slices" + "strconv" + "strings" + "time" + + "blazar/internal/pkg/chain_watcher" + "blazar/internal/pkg/config" + "blazar/internal/pkg/cosmos" + "blazar/internal/pkg/daemon/checks" + "blazar/internal/pkg/docker" + "blazar/internal/pkg/errors" + "blazar/internal/pkg/log" + "blazar/internal/pkg/log/notification" + "blazar/internal/pkg/metrics" + blazarproto "blazar/internal/pkg/proto/blazar" + checksproto "blazar/internal/pkg/proto/daemon" + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" + sm "blazar/internal/pkg/state_machine" + "blazar/internal/pkg/upgrades_registry" + + "github.com/cometbft/cometbft/libs/bytes" + "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type Daemon struct { + // extenral clients + dcc *docker.ComposeClient + dc *docker.Client + cosmosClient *cosmos.Client + + // internal state handling + ur *upgrades_registry.UpgradeRegistry + stateMachine *sm.StateMachine + + // telemetry + metrics *metrics.Metrics + + // initial counters + startupHeight int64 + nodeAddress bytes.HexBytes + nodeInfo *tmservice.GetNodeInfoResponse + + // tracking current height + currHeight int64 + currHeightTime time.Time + observedBlockSpeeds []time.Duration + currBlockSpeed time.Duration +} + +func NewDaemon(ctx context.Context, cfg *config.Config, m *metrics.Metrics) (*Daemon, error) { + if _, err := docker.LoadComposeFile(cfg.ComposeFile); err != nil { + return nil, errors.Wrapf(err, "failed to parse docker compose file") + } + + // setup updates registry + ur, err := upgrades_registry.NewUpgradesRegistryFromConfig(cfg) + if err != nil { + return nil, errors.Wrapf(err, "failed to load upgrade registry") + } + + // setup docker compose client + dc, err := docker.NewClientWithConfig(ctx, cfg.CredentialHelper) + if err != nil { + return nil, errors.Wrapf(err, "failed to create docker client") + } + dcc, err := docker.NewComposeClient(dc, cfg.VersionFile, cfg.ComposeFile, cfg.UpgradeMode) + if err != nil { + return nil, errors.Wrapf(err, "failed to create docker compose client") + } + + // setup new cosmos client + cosmosClient, err := cosmos.NewClient(cfg.Clients.Host, cfg.Clients.GrpcPort, cfg.Clients.CometbftPort, cfg.Clients.Timeout) + if err != nil { + return nil, errors.Wrapf(err, "failed to create cosmos client") + } + + if err := cosmosClient.StartCometbftClient(); err != nil { + return nil, errors.Wrapf(err, "failed to start cometbft client") + } + + return &Daemon{ + dcc: dcc, + dc: dc, + cosmosClient: cosmosClient, + metrics: m, + + // setup by Init() + startupHeight: 0, + currHeight: 0, + currHeightTime: time.Time{}, + observedBlockSpeeds: make([]time.Duration, 5), + currBlockSpeed: 0, + + ur: ur, + stateMachine: ur.GetStateMachine(), + }, nil +} + +func (d *Daemon) Init(ctx context.Context, cfg *config.Config) error { + logger := log.FromContext(ctx).With("package", "daemon") + logger.Info("Starting up blazar daemon...") + + // mark the daemon is up + d.metrics.Up.Set(1) + + // test docker and docker compose + logger.Info("Setting up docker and docker compose clients") + if _, err := d.dcc.DockerClient().ContainerList(ctx, true); err != nil { + return errors.Wrapf(err, "failed to fetch list of containers from docker client") + } + + if _, err := d.dcc.Version(ctx); err != nil { + return errors.Wrapf(err, "could not find docker compose plugin") + } + + // test cosmos client + logger.Info("Attempting to get data from /status endpoint with Cosmos RPC client") + status, err := d.cosmosClient.GetStatus(ctx) + if err != nil { + return errors.Wrapf(err, "failed to get status response") + } + + // display information about the node + d.nodeInfo, err = d.cosmosClient.NodeInfo(ctx) + if err != nil { + return errors.Wrapf(err, "failed to get node info") + } + logger.Infof("Connected to the %s node ID: %s", d.nodeInfo.ApplicationVersion.Name, d.nodeInfo.DefaultNodeInfo.DefaultNodeID) + + // if the env prefix is not set, we set it to _ (e.g "GAIAD_") + if cfg.Compose.EnvPrefix == "" { + cfg.Compose.EnvPrefix = strings.ToUpper(d.nodeInfo.ApplicationVersion.AppName) + "_" + } + logger.Infof("Using env prefix: %s", cfg.Compose.EnvPrefix) + + // ensure required settings and flags are present in the compose file + if err := validateComposeSettings(cfg); err != nil { + return errors.Wrapf(err, "failed to validate docker compose settings") + } + + logger.Infof("Observed latest block height: %d", status.SyncInfo.LatestBlockHeight) + d.currHeight = status.SyncInfo.LatestBlockHeight + d.currHeightTime = status.SyncInfo.LatestBlockTime + d.startupHeight = d.currHeight + + logger.Infof("Observed node address: %s", status.ValidatorInfo.Address.String()) + d.nodeAddress = status.ValidatorInfo.Address + + // test consensus state endpoint + logger.Info("Attempting to get consensus state") + pvp, err := d.cosmosClient.GetPrevoteInfo(ctx) + if err != nil { + return errors.Wrapf(err, "failed to get consensus state") + } + logger.Infof( + "Total VP: %d, Node VP: %d, Node share: %.2f", pvp.TotalVP, status.ValidatorInfo.VotingPower, + (float64(status.ValidatorInfo.VotingPower)/float64(pvp.TotalVP))*100, + ) + + // fetch future upgrades + logger.Info("Attempting to fetch upgrades from all providers") + if _, _, _, _, err := d.ur.Update(ctx, d.currHeight, true); err != nil { + return errors.Wrapf(err, "failed getting upgrades from all providers") + } + + totalUpgrades := d.ur.GetAllUpgradesWithCache() + logger.Infof("Total %d resolved upgrades from all providers", len(totalUpgrades)) + + overridenUpgrades := d.ur.GetOverriddenUpgradesWithCache() + logger.Infof("Total %d upgrades had more than one entry, blazar picked one with the lowest priority", len(overridenUpgrades)) + + upgrades := d.ur.GetUpcomingUpgradesWithCache(d.currHeight, urproto.UpgradeStatus_ACTIVE) + logger.Infof("Found %d future upgrades with status ACTIVE (waiting for execution)", len(upgrades)) + + for _, upgrade := range upgrades { + if _, ok := overridenUpgrades[upgrade.Height]; ok { + logger.Infof("[height=%d] The proposal from: %s with name '%s' and tag '%s' won by priority (%d) with %d other entries", upgrade.Height, upgrade.Source, upgrade.Name, upgrade.Tag, upgrade.Priority, len(overridenUpgrades[upgrade.Height])) + } + } + + // export metrics related to all future proposal + d.updateMetrics() + + return nil +} + +func (d *Daemon) ListenAndServe(ctx context.Context, cfg *config.Config) error { + httpAddr := net.JoinHostPort(cfg.Host, strconv.Itoa(int(cfg.HTTPPort))) + grpcAddr := net.JoinHostPort(cfg.Host, strconv.Itoa(int(cfg.GrpcPort))) + + grpcListener, err := net.Listen("tcp", grpcAddr) + if err != nil { + return errors.Wrapf(err, "error listening on grpc address") + } + + server := grpc.NewServer() + urServer := NewServer(cfg, d.ur) + urproto.RegisterUpgradeRegistryServer(server, urServer) + vrproto.RegisterVersionResolverServer(server, urServer) + blazarproto.RegisterBlazarServer(server, urServer) + + go func() { + logger := log.FromContext(ctx) + if err = server.Serve(grpcListener); err != nil { + logger.Err(err).Error("error serving grpc server") + panic(err) + } + }() + + // lets wait for the server to start + time.Sleep(time.Second) + + grpcConn, err := grpc.NewClient(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return errors.Wrapf(err, "couldn't dial to self grpc address") + } + + mux := runtime.NewServeMux() + + err = urproto.RegisterUpgradeRegistryHandler(ctx, mux, grpcConn) + if err != nil { + return errors.Wrapf(err, "failed registering upgrades registry handler") + } + + err = vrproto.RegisterVersionResolverHandler(ctx, mux, grpcConn) + if err != nil { + return errors.Wrapf(err, "failed registering versions resolver handler") + } + + err = blazarproto.RegisterBlazarHandler(ctx, mux, grpcConn) + if err != nil { + return errors.Wrapf(err, "failed registering blazar handler") + } + + if err = metrics.RegisterHandler(mux); err != nil { + return errors.Wrapf(err, "failed registering metrics handler") + } + + if err = RegisterIndexHandler(mux, d, cfg.Watchers.UPInterval); err != nil { + return errors.Wrapf(err, "failed registering status handler") + } + + // start the http server + // this is used by metrics and upgrades registry + go func() { + if err := http.ListenAndServe(httpAddr, mux); err != nil { + fmt.Println("error serving http server", err) + panic(err) + } + }() + return nil +} + +func (d *Daemon) Run(ctx context.Context, cfg *config.Config) error { + logger := log.FromContext(ctx).With("compose file", cfg.ComposeFile) + ctx = logger.WithContext(ctx) + + for { + // step 0: wait for upgrade height + upgradeHeight, err := d.waitForUpgrade(ctx, cfg) + if err != nil { + // failure to wait for the upgrade is a critical error, therefore we stop the daemon + logger.Err(err).Error("Monitor routine failed").Notify(ctx) + return errors.Wrapf(err, "monitor routine failed") + } + + // step 0a: setup the context with the upgrade height for all further notifications + ctxWithHeight := notification.WithUpgradeHeight(ctx, upgradeHeight) + + // step 1: perform upgrade + err = d.performUpgrade(ctxWithHeight, &cfg.Compose, cfg.ComposeService, upgradeHeight) + d.updateMetrics() + + if err != nil { + ctxWithHeight := notification.WithUpgradeHeight(ctx, upgradeHeight) + logger.Err(err).Error("Upgrade routine failed").Notify(ctxWithHeight) + + // failure to perform upgrade is not a critical errors, therefore we let the daemon continue to run + continue + } + + // step 2: wait for post-upgrade checks + err = d.postUpgradeChecks(ctxWithHeight, d.stateMachine, &cfg.Checks.PostUpgrade, upgradeHeight) + d.updateMetrics() + + if err != nil { + ctxWithHeight := notification.WithUpgradeHeight(ctx, upgradeHeight) + logger.Err(err).Error("Post-upgrade check failed").Notify(ctxWithHeight) + + // failure of post-upgrade check is not a critical error, therefore we let the daemon continue to run + continue + } + + // step 3: mark upgrade as completed + d.stateMachine.MustSetStatus(upgradeHeight, urproto.UpgradeStatus_COMPLETED) + d.updateMetrics() + } +} + +func (d *Daemon) waitForUpgrade(ctx context.Context, cfg *config.Config) (int64, error) { + logger := log.FromContext(ctx) + + logger.Infof("Monitoring %s for new upgrades", cfg.UpgradeInfoFilePath()) + uiw, err := chain_watcher.NewUpgradeInfoWatcher(cfg.UpgradeInfoFilePath(), cfg.Watchers.UIInterval) + if err != nil { + return 0, errors.Wrapf(err, "failed to start upgrade-info.json poller") + } + + logger.Info("Monitoring on-chain latest block height") + var hw *chain_watcher.HeightWatcher + if cfg.Watchers.HInterval > 0 { + hw = chain_watcher.NewPeriodicHeightWatcher(ctx, d.cosmosClient, cfg.Watchers.HInterval) + } else { + hw, err = chain_watcher.NewStreamingHeightWatcher(ctx, d.cosmosClient) + if err != nil { + return 0, errors.Wrapf(err, "failed to start streaming height watcher") + } + } + + logger.Info("Monitoring on-chain upgrade proposals") + upw := chain_watcher.NewUpgradeProposalsWatcher(ctx, d.cosmosClient, d.ur, cfg.Watchers.UPInterval) + + // blockDelta is used to print the current block height every 10 blocks + blockDelta := int64(0) + + for { + select { + case newHeight := <-hw.Heights: + if newHeight.Error != nil { + d.metrics.HwErrs.Inc() + logger.Err(err).Error("Error received from HeightWatcher") + continue + } + currBlockHeight := newHeight.Height + lastBlockHeight := d.currHeight + + // update the block speed and height + d.updateHeightAndBlockSpeed(currBlockHeight) + + // display the current block height every 10 blocks (unless in debug mode) + blockDelta += (currBlockHeight - lastBlockHeight) + if blockDelta >= 10 { + blockDelta = 0 + + logger.Infof("Current observed height: %d", currBlockHeight) + } else { + logger.Debugf("Current observed height: %d", currBlockHeight) + } + + // move to core logic + d.updateMetrics() + + upcomingUpgrades := d.ur.GetUpcomingUpgradesWithCache(d.currHeight, urproto.UpgradeStatus_ACTIVE) + if len(upcomingUpgrades) > 0 { + futureUpgrade := upcomingUpgrades[0] + + // this case should ideally not happen, unless the upgrade status is not persisted and blazar is restarted + if d.startupHeight > futureUpgrade.Height { + logger.Warnf("Skipping upgrade at height %d since it is before daemon startup height %d", futureUpgrade.Height, d.startupHeight) + continue + } + + // let the user know that blazar sees the upcoming upgrade + if d.stateMachine.GetStep(futureUpgrade.Height) == urproto.UpgradeStep_NONE { + d.stateMachine.SetStep(futureUpgrade.Height, urproto.UpgradeStep_MONITORING) + } + + // perform pre upgrade upgrade checks if we are close to the upgrade height + if futureUpgrade.Height < d.currHeight+cfg.Checks.PreUpgrade.Blocks { + newHeight, preErr := d.preUpgradeChecks(ctx, d.currHeight, d.stateMachine, d.dcc, &cfg.Compose, &cfg.Checks.PreUpgrade, cfg.ComposeService, futureUpgrade) + if preErr != nil { + d.stateMachine.MustSetStatus(futureUpgrade.Height, urproto.UpgradeStatus_FAILED) + } + d.updateMetrics() + + // cheat and update the height if we have a new height + if newHeight != 0 { + logger.Infof("Setting observed height to: %d", futureUpgrade.Height) + d.currHeight = newHeight + } + } + + // perform upgrade if we have hit the upgrade height + // NOTE: Governance coordinated upgrades are triggered by the upgrade info watcher (upgrade-info.json) + if futureUpgrade.Height <= d.currHeight && slices.Contains([]urproto.UpgradeType{ + urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + urproto.UpgradeType_NON_GOVERNANCE_UNCOORDINATED, + }, futureUpgrade.Type) { + // cancel existing watchers + hw.Cancel() + upw.Cancel() + + logger.Infof("Received upgrade height from the chain rpc: %v", futureUpgrade.Height) + return futureUpgrade.Height, nil + } + } + case upgrade := <-uiw.Upgrades: + if upgrade.Error != nil { + d.metrics.UiwErrs.Inc() + logger.Err(err).Error("Error received from UpgradesInfoWatcher") + } + d.updateMetrics() + + upgradeHeight := upgrade.Plan.Height + + // cancel existing watchers + hw.Cancel() + upw.Cancel() + + logger.Infof("Received upgrade data from upgrade-info.json: %v", upgrade) + return upgradeHeight, nil + case err := <-upw.Errors: + d.metrics.UpwErrs.Inc() + logger.Err(err).Error("Error received from UpgradesProposalsWatcher") + } + } +} + +func (d *Daemon) performUpgrade( + ctx context.Context, + compose *config.ComposeCli, + serviceName string, + upgradeHeight int64, +) (err error) { + defer func() { + // ensure we update the status to failed if any error was encountered + if err != nil { + d.stateMachine.MustSetStatus(upgradeHeight, urproto.UpgradeStatus_FAILED) + } + }() + ctx = notification.WithUpgradeHeight(ctx, upgradeHeight) + + d.stateMachine.MustSetStatus(upgradeHeight, urproto.UpgradeStatus_EXECUTING) + + // ensure the upgrade is still valid + upgrade := d.ur.GetUpgradeWithCache(upgradeHeight) + if upgrade == nil { + return fmt.Errorf("upgrade with height %d not found", upgradeHeight) + } + + // sanity check to ensure we are not performing upgrades at wrong times + if upgradeHeight < d.currHeight { + return fmt.Errorf("upgrade height %d is less than last observed height %d", upgradeHeight, d.currHeight) + } + + // ensure the docker image is present on the host (this should be done in a pre-check phase though). Better safe than sorry + var currImage, newImage string + currImage, newImage, err = checks.PullDockerImage(ctx, d.dcc, serviceName, upgrade.Tag, upgrade.Height) + if err != nil { + return err + } + + logger := log.FromContext(ctx) + + logger.Infof("Current image: %s. New image: %s found on the host", currImage, newImage) + d.stateMachine.MustSetStatusAndStep(upgradeHeight, urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStep_COMPOSE_FILE_UPGRADE) + + // take container down or check if it is down already + isRunning, err := d.dcc.IsServiceRunning(ctx, serviceName, compose.DownTimeout) + if err != nil { + return errors.Wrapf(err, "failed to check if service is running") + } + + if isRunning { + logger.Info("Executing compose down").Notifyf(ctx, "Shutting down chain to perform upgrade. Current image: %s, new image: %s", currImage, newImage) + if err = d.dcc.Down(ctx, serviceName, compose.DownTimeout); err != nil { + return errors.Wrapf(err, "failed to down compose") + } + } + + logger.Info("Changing image in compose file") + if err = d.dcc.UpgradeImage(ctx, serviceName, upgrade.Tag); err != nil { + return errors.Wrapf(err, "failed to upgrade image") + } + + logger.Info("Executing compose up") + if err = d.dcc.Up(ctx, serviceName, compose.UpDeadline); err != nil { + return errors.Wrapf(err, "failed to up compose") + } + + msg := fmt.Sprintf("Upgrade completed. New image: %s. Now waiting for post-upgrade check to pass", newImage) + logger.Info(msg).Notify(ctx) + + return nil +} + +func (d *Daemon) updateHeightAndBlockSpeed(newHeight int64) { + // calculate block speed based on the last few observed blocks + lastBlockHeight := d.currHeight + lastHeightTime := d.currHeightTime + d.currHeight = newHeight + d.currHeightTime = time.Now() + + // this may happen when polling + if d.currHeight != lastBlockHeight { + n := newHeight % int64(cap(d.observedBlockSpeeds)) + d.observedBlockSpeeds[n] = time.Millisecond * time.Duration(d.currHeightTime.Sub(lastHeightTime).Milliseconds()/(d.currHeight-lastBlockHeight)) + } + + sum, cnt := 0.0, 0 + for _, blockSpeed := range d.observedBlockSpeeds { + if blockSpeed != 0 { + sum += blockSpeed.Seconds() + cnt++ + } + } + if cnt != 0 { + d.currBlockSpeed = time.Duration(sum / float64(cnt) * float64(time.Second)) + } +} + +func validateComposeSettings(cfg *config.Config) error { + composeFile, err := docker.LoadComposeFile(cfg.ComposeFile) + if err != nil { + return errors.Wrapf(err, "failed to parse docker compose file") + } + + if slices.Contains(cfg.Checks.PreUpgrade.Enabled, checksproto.PreCheck_SET_HALT_HEIGHT.String()) { + prefix := cfg.Compose.EnvPrefix + "HALT_HEIGHT" + service, err := composeFile.GetService(cfg.ComposeService) + if err != nil { + return errors.Wrapf(err, "failed to get service %s from compose file", cfg.ComposeService) + } + + if _, ok := service.Environment[prefix]; !ok { + return fmt.Errorf("please add '%s=${HALT_HEIGHT}' to services.%s.environment docker compose section", prefix, cfg.ComposeService) + } + + if !slices.Contains([]string{"no", ""}, service.Restart) { + return errors.New("SET_HALT_HEIGHT precheck won't work with a restart policy set, please remove it") + } + } + + return nil +} diff --git a/internal/pkg/daemon/daemon_test.go b/internal/pkg/daemon/daemon_test.go new file mode 100644 index 0000000..040a1c6 --- /dev/null +++ b/internal/pkg/daemon/daemon_test.go @@ -0,0 +1,482 @@ +package daemon + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "net" + "os" + "os/exec" + "os/user" + "path" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "blazar/internal/pkg/cmd" + "blazar/internal/pkg/config" + "blazar/internal/pkg/cosmos" + "blazar/internal/pkg/docker" + "blazar/internal/pkg/errors" + "blazar/internal/pkg/log/logger" + "blazar/internal/pkg/log/notification" + "blazar/internal/pkg/metrics" + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" + "blazar/internal/pkg/provider" + "blazar/internal/pkg/provider/database" + "blazar/internal/pkg/provider/local" + "blazar/internal/pkg/state_machine" + "blazar/internal/pkg/testutils" + "blazar/internal/pkg/upgrades_registry" + + "github.com/cometbft/cometbft/proto/tendermint/p2p" + "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "gorm.io/driver/sqlite" + "gorm.io/gorm" +) + +var ( + simd1RepoTag string + simd2RepoTag string +) + +func TestMain(m *testing.M) { + dockerProvider, err := testcontainers.NewDockerProvider() + if err != nil { + fmt.Println("failed to create docker provider") + os.Exit(1) + } + + // build test simapp images (v0.0.1 and v0.0.2) + simd1RepoTag, simd2RepoTag = testutils.BuildTestImages(context.Background(), dockerProvider) + + os.Exit(m.Run()) +} + +// Blazar end-to-end integration test for LOCAL and DATABASE providers. +// +// The simd v0.0.1 image is configured to perform upgrade at height 10. +// The target v0.0.2 image has a upgrade handler compiled in to simulate the real case upgrade process. +func TestIntegrationDaemon(t *testing.T) { + defer func() { + if t.Failed() { + yellow, reset := "\033[33m", "\033[0m" + t.Logf("%sWARNING: Test failed, please check if you any stray containers running in docker ps, and kill them%s", yellow, reset) + } + }() + + // we can't register 2 metrics, but this sharing this should probably cause no problems + metrics, err := metrics.NewMetrics("/path/to/docker-compose.yml", "dummy", "test") + require.NoError(t, err) + + ports := getFreePorts(t, 4) + + t.Run("LocalProvider", func(t *testing.T) { + name := fmt.Sprintf("blazar-e2e-test-local-simapp-%d", rand.Uint64()) + t.Parallel() + tempDir := testutils.PrepareTestData(t, "", "daemon", name) + + provider, err := local.NewProvider( + path.Join(tempDir, "blazar", "local.db.json"), + "test", + 1, + ) + if err != nil { + t.Fatalf("failed to create local provider: %v", err) + } + + run(t, metrics, provider, urproto.ProviderType_LOCAL, tempDir, name, ports[0], ports[1]) + }) + + t.Run("DatabaseProvider", func(t *testing.T) { + name := fmt.Sprintf("blazar-e2e-test-db-simapp-%d", rand.Uint64()) + t.Parallel() + tempDir := testutils.PrepareTestData(t, "", "daemon", name) + + provider, err := prepareMockDatabaseProvider() + if err != nil { + t.Fatalf("failed to create database provider: %v", err) + } + + run(t, metrics, provider, urproto.ProviderType_DATABASE, tempDir, name, ports[2], ports[3]) + }) +} + +// The integration test for the daemon asserts that all 3 types of upgrades are successfully executed (for a given provider). This is: +// 1. GOVERNANCE +// 2. NON_GOVERNANCE_UNCOORDINATED +// 3. NON_GOVERNANCE_COORDINATED +func run(t *testing.T, metrics *metrics.Metrics, prvdr provider.UpgradeProvider, source urproto.ProviderType, tempDir, serviceName string, grpcPort, cometbftPort int) { + // ------ PREPARE ENVIRONMENT ------ // + cfg := generateConfig(t, tempDir, serviceName, grpcPort, cometbftPort) + + // inject test logger + outBuffer := &threadSafeBuffer{} + output := zerolog.ConsoleWriter{Out: outBuffer, TimeFormat: time.Kitchen, NoColor: true} + log := zerolog.New(output).With().Str("module", "blazar").Timestamp().Logger() + + ctx := logger.WithContext(context.Background(), &log) + fallbackNotifier := notification.NewFallbackNotifier(cfg, nil, &log, "test") + ctx = notification.WithContextFallback(ctx, fallbackNotifier) + + // compose client with logger + dcc, err := docker.NewDefaultComposeClient(ctx, nil, cfg.VersionFile, cfg.ComposeFile, cfg.UpgradeMode) + require.NoError(t, err) + + // ensure we run container with current user (not root!) + currentUser, err := user.Current() + require.NoError(t, err) + err = os.Setenv("MY_UID", currentUser.Uid) + require.NoError(t, err) + + // initialzie new upgrade registry + sm := state_machine.NewStateMachine(nil) + ur := upgrades_registry.NewUpgradeRegistry( + map[urproto.ProviderType]provider.UpgradeProvider{source: prvdr}, + []urproto.ProviderType{source}, + sm, + "test", + ) + + // add test upgrades + require.NoError(t, ur.AddUpgrade(ctx, &urproto.Upgrade{ + Height: 10, + Tag: strings.Split(simd2RepoTag, ":")[1], + Network: "test", + Name: "test", + Type: urproto.UpgradeType_GOVERNANCE, + Status: urproto.UpgradeStatus_UNKNOWN, + Step: urproto.UpgradeStep_NONE, + Source: source, + Priority: 1, + ProposalId: nil, + }, false)) + + require.NoError(t, ur.AddUpgrade(ctx, &urproto.Upgrade{ + // this fails with 11/12 as the post upgrade cheecks finish when 11 + // and sometimes 12(when I runs 6 instance s of this test parallelly) has been hit, + // and the next height detected by the height watcher is 12/13 + // + // So GetUpcomingUpgradesWithCache would skip it + // + // maybe we should not allow users to regiser upgrades for gov upgrade height + 1 + // as they are guaranteed to be skipped + Height: 13, + Tag: strings.Split(simd2RepoTag, ":")[1], + Network: "test", + Name: "test_uncoordinated", + Type: urproto.UpgradeType_NON_GOVERNANCE_UNCOORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Step: urproto.UpgradeStep_NONE, + Source: source, + Priority: 1, + ProposalId: nil, + }, false)) + + require.NoError(t, ur.AddUpgrade(ctx, &urproto.Upgrade{ + // Similar reasoning as above height + Height: 19, + Tag: strings.Split(simd2RepoTag, ":")[1], + Network: "test", + Name: "test_coordinated", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Step: urproto.UpgradeStep_NONE, + Source: source, + Priority: 1, + ProposalId: nil, + }, false)) + + // refresh the upgrade registry cache + _, _, _, _, err = ur.Update(ctx, 0, true) + require.NoError(t, err) + + daemon := Daemon{ + dcc: dcc, + ur: ur, + stateMachine: sm, + metrics: metrics, + observedBlockSpeeds: make([]time.Duration, 5), + nodeInfo: &tmservice.GetNodeInfoResponse{ + DefaultNodeInfo: &p2p.DefaultNodeInfo{ + Network: "test", + }, + }, + } + require.NoError(t, err) + + // start the siapp node + _, _, err = cmd.CheckOutputWithDeadline(ctx, 5*time.Second, nil, "docker", "compose", "-f", cfg.ComposeFile, "up", "-d", "--force-recreate") + require.NoError(t, err) + + // start cosmos client and wait for it to be ready + cosmosClient, err := cosmos.NewClient(cfg.Clients.Host, cfg.Clients.GrpcPort, cfg.Clients.CometbftPort, cfg.Clients.Timeout) + require.NoError(t, err) + + for range 20 { + if err = cosmosClient.StartCometbftClient(); err != nil { + time.Sleep(500 * time.Millisecond) + continue + } + } + + require.NoError(t, err) + daemon.cosmosClient = cosmosClient + + // wait just in case the rpc is not responsive yet + time.Sleep(2 * time.Second) + + // ------ TEST GOVERNANCE UPGRADE ------ // + // we expect the chain to upgrade to simd2RepoTag at height 10 // + latestHeight, err := cosmosClient.GetLatestBlockHeight(ctx) + require.NoError(t, err) + require.LessOrEqual(t, latestHeight, int64(8), "the test is faulty, the chain is already at height >= 8") + + height, err := daemon.waitForUpgrade(ctx, cfg) + require.NoError(t, err) + require.Equal(t, int64(10), height) + + // get simapp container logs + var stdout bytes.Buffer + cmd := exec.Command("docker", "compose", "-f", cfg.ComposeFile, "logs") + cmd.Stdout = &stdout + err = cmd.Run() + require.NoError(t, err) + + // chain process must have logged upgrade height being hit + require.Contains(t, stdout.String(), "UPGRADE \"test1\" NEEDED at height: 10") + + // perform the upgrade + err = daemon.performUpgrade(ctx, &cfg.Compose, cfg.ComposeService, height) + require.NoError(t, err) + + // ensure the upgrade was successful + isImageContainerRunning, err := dcc.IsServiceRunning(ctx, cfg.ComposeService, time.Second*2) + require.NoError(t, err) + require.True(t, isImageContainerRunning) + + // blazar should have logged all this + require.Contains(t, outBuffer.String(), fmt.Sprintf("Monitoring %s for new upgrades", cfg.UpgradeInfoFilePath())) + require.Contains(t, outBuffer.String(), "Received upgrade data from upgrade-info.json") + require.Contains(t, outBuffer.String(), "Executing compose up") + require.Contains(t, outBuffer.String(), fmt.Sprintf("Upgrade completed. New image: %s", simd2RepoTag)) + + // lets see if post upgrade checks pass + err = daemon.postUpgradeChecks(ctx, sm, &cfg.Checks.PostUpgrade, height) + require.NoError(t, err) + + outBuffer.Reset() + + // ------ TEST NON_GOVERNANCE_UNCOORDINATED UPGRADE ------ // + // we expect the chain to upgrade to simd2RepoTag at height 13 // + latestHeight, err = cosmosClient.GetLatestBlockHeight(ctx) + require.NoError(t, err) + require.LessOrEqual(t, latestHeight, int64(11), "the test is faulty, the chain is already at height >= 11") + + upgrades, err := ur.GetUpcomingUpgrades(ctx, false, 11, urproto.UpgradeStatus_SCHEDULED, urproto.UpgradeStatus_ACTIVE, urproto.UpgradeStatus_EXECUTING) + require.NoError(t, err) + require.Len(t, upgrades, 2) + + height, err = daemon.waitForUpgrade(ctx, cfg) + require.NoError(t, err) + require.Equal(t, int64(13), height) + + require.Contains(t, outBuffer.String(), fmt.Sprintf("Monitoring %s for new upgrades", cfg.UpgradeInfoFilePath())) + require.Contains(t, outBuffer.String(), "Received upgrade height from the chain rpc") + + err = daemon.performUpgrade(ctx, &cfg.Compose, cfg.ComposeService, height) + require.NoError(t, err) + + require.Contains(t, outBuffer.String(), "Executing compose up") + require.Contains(t, outBuffer.String(), fmt.Sprintf("Upgrade completed. New image: %s", simd2RepoTag)) + + // Lets see if post upgrade checks pass + err = daemon.postUpgradeChecks(ctx, sm, &cfg.Checks.PostUpgrade, height) + require.NoError(t, err) + + outBuffer.Reset() + + // ------ TEST NON_GOVERNANCE_COORDINATED UPGRADE (with HALT_HEIGHT) ------ // + // we expect the chain to upgrade to simd2RepoTag at height 19 // + latestHeight, err = cosmosClient.GetLatestBlockHeight(ctx) + require.NoError(t, err) + require.LessOrEqual(t, latestHeight, int64(14), "the test is faulty, the chain is already at height > 14") + + upgrades, err = ur.GetUpcomingUpgrades(ctx, false, 14, urproto.UpgradeStatus_SCHEDULED, urproto.UpgradeStatus_ACTIVE, urproto.UpgradeStatus_EXECUTING) + require.NoError(t, err) + require.Len(t, upgrades, 1) + + height, err = daemon.waitForUpgrade(ctx, cfg) + require.NoError(t, err) + require.Equal(t, int64(19), height) + + // get container logs + stdout.Reset() + cmd = exec.Command("docker", "compose", "-f", cfg.ComposeFile, "logs") + cmd.Stdout = &stdout + err = cmd.Run() + require.NoError(t, err) + + require.Contains(t, stdout.String(), "halt per configuration height 19") + + require.Contains(t, outBuffer.String(), fmt.Sprintf("Monitoring %s for new upgrades", cfg.UpgradeInfoFilePath())) + require.Contains(t, outBuffer.String(), "Received upgrade height from the chain rpc") + + // older cosmos-sdk versions exit the node while the newer ones don't + // in this case simapp will halt at height 19 but won't exit + // we want to be sure the pre-check worked + require.Contains(t, outBuffer.String(), "HALT_HEIGHT likely worked but didn't shut down the node") + + err = daemon.performUpgrade(ctx, &cfg.Compose, cfg.ComposeService, height) + require.NoError(t, err) + + // lets see if post upgrade checks pass + err = daemon.postUpgradeChecks(ctx, sm, &cfg.Checks.PostUpgrade, height) + require.NoError(t, err) + + // cleanup + err = dcc.Down(ctx, cfg.ComposeService, cfg.Compose.DownTimeout) + require.NoError(t, err) +} + +type threadSafeBuffer struct { + buf bytes.Buffer + mu sync.Mutex +} + +func (b *threadSafeBuffer) Write(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.Write(p) +} + +func (b *threadSafeBuffer) String() string { + b.mu.Lock() + defer b.mu.Unlock() + return b.buf.String() +} + +func (b *threadSafeBuffer) Reset() { + b.mu.Lock() + defer b.mu.Unlock() + b.buf.Reset() +} + +func generateConfig(t *testing.T, tempDir, serviceName string, grpcPort, cometbftPort int) *config.Config { + err := testutils.WriteTmpl(filepath.Join(tempDir, "docker-compose.yml.tmpl"), struct { + ServiceName string + Image string + GrpcPort int + CometbftPort int + }{ + ServiceName: serviceName, + Image: simd1RepoTag, + GrpcPort: grpcPort, + CometbftPort: cometbftPort, + }) + require.NoError(t, err) + + return &config.Config{ + ChainHome: filepath.Join(tempDir, "chain-home"), + ComposeFile: filepath.Join(tempDir, "docker-compose.yml"), + ComposeService: serviceName, + VersionFile: "", + UpgradeMode: config.UpgradeInComposeFile, + Host: "dummy", + Watchers: config.Watchers{ + UIInterval: time.Millisecond * 5, + HInterval: time.Second * 0, + UPInterval: time.Minute * 5, + }, + Clients: config.Clients{ + Host: "localhost", + GrpcPort: uint16(grpcPort), + CometbftPort: uint16(cometbftPort), + Timeout: 10 * time.Second, + }, + Checks: config.Checks{ + PreUpgrade: config.PreUpgrade{ + Enabled: []string{"SET_HALT_HEIGHT"}, + // as soon as possible + Blocks: 100, + SetHaltHeight: &config.SetHaltHeight{ + DelayBlocks: 0, + }, + }, + PostUpgrade: config.PostUpgrade{ + // cannot enable FIRST_BLOCK_VOTED here as the test validator has + // prevotes_bit_array "BA{1:_} 0/1 = 0.00" + Enabled: []string{"GRPC_RESPONSIVE", "CHAIN_HEIGHT_INCREASED"}, + GrpcResponsive: &config.GrpcResponsive{ + PollInterval: 300 * time.Millisecond, + Timeout: 20 * time.Second, + }, + ChainHeightIncreased: &config.ChainHeightIncreased{ + PollInterval: 300 * time.Millisecond, + Timeout: 20 * time.Second, + NotifInterval: 10 * time.Minute, + }, + FirstBlockVoted: &config.FirstBlockVoted{ + PollInterval: 300 * time.Millisecond, + Timeout: 20 * time.Second, + NotifInterval: 10 * time.Minute, + }, + }, + }, + Compose: config.ComposeCli{ + DownTimeout: time.Second * 30, + UpDeadline: time.Second * 30, + EnvPrefix: "SIMD_", + }, + } +} + +func getFreePorts(t *testing.T, n int) []int { + var ports []int + var listeners []net.Listener + + // create n listeners + for range n { + listener, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + + // get the assigned port from the listener + addr := listener.Addr().(*net.TCPAddr) + ports = append(ports, addr.Port) + + // add listener to slice for later closing + listeners = append(listeners, listener) + } + + // close all listeners after getting ports + for _, listener := range listeners { + err := listener.Close() + require.NoError(t, err) + } + + return ports +} + +func prepareMockDatabaseProvider() (*database.Provider, error) { + db, err := gorm.Open(sqlite.Open("file::memory:"), &gorm.Config{}) + if err != nil { + return nil, errors.Wrapf(err, "failed to connect database") + } + err = db.AutoMigrate(&urproto.Upgrade{}) + if err != nil { + return nil, errors.Wrapf(err, "database migration failed for upgrades table") + } + + err = db.AutoMigrate(&vrproto.Version{}) + if err != nil { + return nil, errors.Wrapf(err, "database migration failed for versions table") + } + return database.NewDatabaseProviderWithDB(db, "test", 1), nil +} diff --git a/internal/pkg/daemon/grpc.go b/internal/pkg/daemon/grpc.go new file mode 100644 index 0000000..0edee61 --- /dev/null +++ b/internal/pkg/daemon/grpc.go @@ -0,0 +1,222 @@ +package daemon + +import ( + "cmp" + "context" + "slices" + "strings" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/cosmos" + "blazar/internal/pkg/errors" + blazarproto "blazar/internal/pkg/proto/blazar" + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" + "blazar/internal/pkg/upgrades_registry" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type Server struct { + urproto.UnimplementedUpgradeRegistryServer + vrproto.UnimplementedVersionResolverServer + blazarproto.UnimplementedBlazarServer + + cfg *config.Config + ur *upgrades_registry.UpgradeRegistry +} + +// Need to supply logger explicitly because grpc creates its own context +func NewServer(cfg *config.Config, ur *upgrades_registry.UpgradeRegistry) *Server { + return &Server{ + cfg: cfg, + ur: ur, + } +} + +func (s *Server) AddUpgrade(ctx context.Context, in *urproto.AddUpgradeRequest) (*urproto.AddUpgradeResponse, error) { + if in == nil || in.Upgrade == nil { + return nil, status.Errorf(codes.Internal, "request is empty") + } + + in.Upgrade.Tag = strings.TrimSpace(in.Upgrade.Tag) + in.Upgrade.Network = s.cfg.UpgradeRegistry.Network + + err := s.ur.AddUpgrade(ctx, in.Upgrade, in.GetOverwrite()) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to add upgrade: %v", err) + } + + // It is confusing for users having to wait for the upgrades list to refresh in X seconds, so we force update here + if _, err := s.forceUpdate(ctx); err != nil { + return nil, status.Errorf(codes.Internal, "failed to force update: %v", err) + } + + return &urproto.AddUpgradeResponse{}, nil +} + +func (s *Server) CancelUpgrade(ctx context.Context, in *urproto.CancelUpgradeRequest) (*urproto.CancelUpgradeResponse, error) { + if in == nil || in.Height == 0 { + return nil, status.Errorf(codes.Internal, "request is empty") + } + + err := s.ur.CancelUpgrade(ctx, in.Height, in.Source, s.cfg.UpgradeRegistry.Network, in.Force) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to cancel upgrade: %v", err) + } + + // It is confusing for users having to wait for the upgrades list to refresh in X seconds, so we force update here + if _, err := s.forceUpdate(ctx); err != nil { + return nil, status.Errorf(codes.Internal, "failed to force update: %v", err) + } + + return &urproto.CancelUpgradeResponse{}, nil +} + +func (s *Server) ListUpgrades(ctx context.Context, in *urproto.ListUpgradesRequest) (*urproto.ListUpgradesResponse, error) { + all, err := s.ur.GetAllUpgrades(ctx, !in.GetDisableCache()) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to list upgrades: %v", err) + } + + stateMachine := s.ur.GetStateMachine() + upgrades := []*urproto.Upgrade{} + for _, upgrade := range all { + // filter out upgrades that don't match the request + if in.Height != nil && in.GetHeight() != upgrade.Height { + continue + } + if in.Type != nil && in.GetType() != upgrade.Type { + continue + } + if in.Source != nil && in.GetSource() != upgrade.Source { + continue + } + + upgrade.Status = stateMachine.GetStatus(upgrade.Height) + upgrade.Step = stateMachine.GetStep(upgrade.Height) + + if len(in.Status) > 0 && !slices.Contains(in.Status, upgrade.Status) { + continue + } + + upgrades = append(upgrades, upgrade) + } + + slices.SortFunc(upgrades, func(i, j *urproto.Upgrade) int { + return cmp.Compare(j.Height, i.Height) + }) + + if in.Limit != nil && int64(len(upgrades)) > *in.Limit { + upgrades = upgrades[:*in.Limit] + } + + return &urproto.ListUpgradesResponse{Upgrades: upgrades}, nil +} + +func (s *Server) AddVersion(ctx context.Context, in *vrproto.RegisterVersionRequest) (*vrproto.RegisterVersionResponse, error) { + if in == nil || in.Version == nil { + return nil, status.Errorf(codes.Internal, "request is empty") + } + + in.Version.Network = s.cfg.UpgradeRegistry.Network + in.Version.Tag = strings.TrimSpace(in.Version.Tag) + + err := s.ur.RegisterVersion(ctx, in.Version, in.GetOverwrite()) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to register version: %v", err) + } + + // It is confusing for users having to wait for the upgrades list to refresh in X seconds, so we force update here + if _, err := s.forceUpdate(ctx); err != nil { + return nil, status.Errorf(codes.Internal, "failed to force update: %v", err) + } + + return &vrproto.RegisterVersionResponse{}, nil +} + +func (s *Server) ListVersions(ctx context.Context, in *vrproto.ListVersionsRequest) (*vrproto.ListVersionsResponse, error) { + all, err := s.ur.GetAllVersions(ctx, !in.GetDisableCache()) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to list versions: %v", err) + } + + versions := []*vrproto.Version{} + for _, version := range all { + // filter out versions that don't match the request + if in.Height != nil && in.GetHeight() != version.Height { + continue + } + if in.Source != nil && in.GetSource() != version.Source { + continue + } + + versions = append(versions, version) + } + + slices.SortFunc(versions, func(i, j *vrproto.Version) int { + return cmp.Compare(i.Height, j.Height) + }) + + return &vrproto.ListVersionsResponse{Versions: versions}, nil +} + +func (s *Server) GetVersion(ctx context.Context, in *vrproto.GetVersionRequest) (*vrproto.GetVersionResponse, error) { + version, err := s.ur.GetVersion(ctx, !in.GetDisableCache(), in.Height) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get version: %v", err) + } + + return &vrproto.GetVersionResponse{ + Version: version, + }, nil +} + +func (s *Server) GetLastestHeight(ctx context.Context, _ *blazarproto.GetLatestHeightRequest) (*blazarproto.GetLatestHeightResponse, error) { + cosmosClient, err := cosmos.NewClient(s.cfg.Clients.Host, s.cfg.Clients.GrpcPort, s.cfg.Clients.CometbftPort, s.cfg.Clients.Timeout) + if err != nil { + return nil, errors.Wrapf(err, "failed to create cosmos client") + } + + if err := cosmosClient.StartCometbftClient(); err != nil { + return nil, errors.Wrapf(err, "failed to start cometbft client") + } + + height, err := cosmosClient.GetLatestBlockHeight(ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to get latest block height") + } + + // TODO: we should add some sanity checks here to make sure the returned height is not stale (due to the node not being synced etc) + return &blazarproto.GetLatestHeightResponse{ + Height: height, + Network: s.cfg.UpgradeRegistry.Network, + }, nil +} + +func (s *Server) ForceSync(ctx context.Context, _ *urproto.ForceSyncRequest) (*urproto.ForceSyncResponse, error) { + syncHeight, err := s.forceUpdate(ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to force update: %v", err) + } + + return &urproto.ForceSyncResponse{ + Height: syncHeight, + }, nil +} + +func (s *Server) forceUpdate(ctx context.Context) (int64, error) { + cosmosClient, err := cosmos.NewClient(s.cfg.Clients.Host, s.cfg.Clients.GrpcPort, s.cfg.Clients.CometbftPort, s.cfg.Clients.Timeout) + if err != nil { + return 0, err + } + + lastHeight, err := cosmosClient.GetLatestBlockHeight(ctx) + if err != nil { + return 0, err + } + + _, _, _, _, err = s.ur.Update(ctx, lastHeight, true) + return lastHeight, err +} diff --git a/internal/pkg/daemon/index.go b/internal/pkg/daemon/index.go new file mode 100644 index 0000000..4373ee9 --- /dev/null +++ b/internal/pkg/daemon/index.go @@ -0,0 +1,166 @@ +package daemon + +import ( + "cmp" + "encoding/base64" + "fmt" + "math" + "net/http" + "slices" + "strconv" + "strings" + "text/template" + "time" + + "blazar/internal/pkg/daemon/util" + urproto "blazar/internal/pkg/proto/upgrades_registry" + "blazar/internal/pkg/static" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" +) + +func RegisterIndexHandler(mux *runtime.ServeMux, d *Daemon, upInterval time.Duration) error { + return mux.HandlePath("GET", "/", func(w http.ResponseWriter, r *http.Request, _ map[string]string) { + t, err := template.ParseFS(static.Templates, "templates/index/index-blazar.html") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + + logoData, err := static.Templates.ReadFile("templates/index/logo.png") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + + disableCache := false + value := r.FormValue("disable_cache") + if value == "true" { + disableCache = true + } + + all, err := d.ur.GetAllUpgrades(r.Context(), !disableCache) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + + warning := "" + latestHeight, err := d.cosmosClient.GetLatestBlockHeight(r.Context()) + if err != nil { + warning = "Failed to get latest block height from Cosmos: " + err.Error() + } + + syncInfo := d.ur.SyncInfo() + stateMachine := d.ur.GetStateMachine() + + blockSpeed := d.currBlockSpeed + + blocksToUpgradeMap := make(map[int64]string) + blocksToETAMap := make(map[int64]string) + upgrades, i := make([]*urproto.Upgrade, len(all)), 0 + + for _, upgrade := range all { + upgrade.Status = stateMachine.GetStatus(upgrade.Height) + upgrade.Step = stateMachine.GetStep(upgrade.Height) + + blocksToUpgrade := "" + if latestHeight != 0 { + blocksToUpgrade = strconv.FormatInt(upgrade.GetHeight()-latestHeight, 10) + } + + blocksToUpgradeMap[upgrade.Height] = blocksToUpgrade + eta := time.Duration((upgrade.GetHeight()-latestHeight)*blockSpeed.Milliseconds()) * time.Millisecond + blocksToETAMap[upgrade.Height] = formatRelativeTime(time.Now().Add(eta)) + upgrades[i] = upgrade + i++ + } + + // sort descending by height, because we humans like to have the upcoming upgrades at the top + slices.SortFunc(upgrades, func(i, j *urproto.Upgrade) int { + return cmp.Compare(j.Height, i.Height) + }) + + if syncInfo.LastUpdateTime.IsZero() { + warning = "Blazar haven't synced with the Cosmos network yet. Please wait for the first sync to complete." + } + + err = t.Execute(w, struct { + LastUpdateTime string + LastUpdateDiff string + SecondsToNextUpdate int64 + LastBlockHeight int64 + CurrentBlockHeight int64 + BlockSpeed float64 + Upgrades []*urproto.Upgrade + BlocksToUpgrade map[int64]string + BlocksToETA map[int64]string + UpgradeProgress map[int64]string + Hostname string + Providers map[int32]string + UpgradeTypes map[int32]string + DefaultNetwork string + LogoBase64 string + Warning string + }{ + LastUpdateTime: syncInfo.LastUpdateTime.UTC().Format(time.RFC3339), + LastUpdateDiff: time.Since(syncInfo.LastUpdateTime).Truncate(time.Second).String(), + SecondsToNextUpdate: int64(time.Until(syncInfo.LastUpdateTime.Add(upInterval)).Seconds()), + LastBlockHeight: syncInfo.LastBlockHeight, + CurrentBlockHeight: latestHeight, + BlockSpeed: blockSpeed.Seconds(), + Upgrades: upgrades, + BlocksToUpgrade: blocksToUpgradeMap, + BlocksToETA: blocksToETAMap, + Hostname: util.GetHostname(), + Providers: map[int32]string{ + urproto.ProviderType_value["LOCAL"]: "LOCAL", + urproto.ProviderType_value["DATABASE"]: "DATABASE", + }, + UpgradeTypes: urproto.UpgradeType_name, + DefaultNetwork: d.ur.Network(), + LogoBase64: base64.StdEncoding.EncodeToString(logoData), + Warning: warning, + }) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + }) +} + +func formatRelativeTime(t time.Time) string { + now := time.Now() + diff := t.Sub(now) + + if diff < 0 { + return "" + } + + days := int(diff.Hours() / 24) + hours := int(math.Mod(diff.Hours(), 24)) + minutes := int(math.Mod(diff.Minutes(), 60)) + + if days == 0 && hours == 0 && minutes <= 1 { + return "now" + } + parts := make([]string, 0, 3) + + if days > 0 { + parts = append(parts, fmt.Sprintf("%dd", days)) + } + + if hours > 0 { + parts = append(parts, fmt.Sprintf("%dh", hours)) + } + + if days == 0 && minutes > 0 { + parts = append(parts, fmt.Sprintf("%dm", minutes)) + } + + return strings.Join(parts, " ") +} diff --git a/internal/pkg/daemon/metrics.go b/internal/pkg/daemon/metrics.go new file mode 100644 index 0000000..bc82525 --- /dev/null +++ b/internal/pkg/daemon/metrics.go @@ -0,0 +1,20 @@ +package daemon + +import ( + "strconv" +) + +func (d *Daemon) updateMetrics() { + // the upgrade state may change, we don't want to persist the metric with the old status + d.metrics.State.Reset() + d.metrics.BlocksToUpgrade.Reset() + + upcomingUpgrades := d.ur.GetUpcomingUpgradesWithCache(d.currHeight) + for _, upgrade := range upcomingUpgrades { + upgradeHeight := strconv.FormatInt(upgrade.Height, 10) + status := d.stateMachine.GetStatus(upgrade.Height) + + d.metrics.State.WithLabelValues(upgradeHeight, upgrade.Name, status.String()).Set(float64(d.stateMachine.GetStep(upgrade.Height))) + d.metrics.BlocksToUpgrade.WithLabelValues(upgradeHeight, upgrade.Name, status.String()).Set(float64(upgrade.Height - d.currHeight)) + } +} diff --git a/internal/pkg/daemon/util/util.go b/internal/pkg/daemon/util/util.go new file mode 100644 index 0000000..a415580 --- /dev/null +++ b/internal/pkg/daemon/util/util.go @@ -0,0 +1,31 @@ +package util + +import ( + "fmt" + "os" + + "blazar/internal/pkg/docker" + "blazar/internal/pkg/errors" +) + +func GetHostname() string { + hostname, err := os.Hostname() + if err != nil { + hostname = "unknown" + } + + return hostname +} + +// Get the current image from compose file and the image corresponding to the +// upgradeTag +func GetCurrImageUpgradeImage(dcc *docker.ComposeClient, serviceName, upgradeTag string) (string, string, error) { + currImage, currVersion, err := dcc.GetImageAndVersionFromCompose(serviceName) + if err != nil { + return "", "", errors.Wrapf(err, "failed to get image for service %s", serviceName) + } + currComposeImage := fmt.Sprintf("%s:%s", currImage, currVersion) + newImage := fmt.Sprintf("%s:%s", currImage, upgradeTag) + + return currComposeImage, newImage, nil +} diff --git a/internal/pkg/docker/compose.go b/internal/pkg/docker/compose.go new file mode 100644 index 0000000..8b16353 --- /dev/null +++ b/internal/pkg/docker/compose.go @@ -0,0 +1,482 @@ +package docker + +import ( + "context" + "fmt" + "math" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "time" + + "blazar/internal/pkg/cmd" + "blazar/internal/pkg/config" + "blazar/internal/pkg/errors" + "blazar/internal/pkg/log" + + compose "github.com/compose-spec/compose-go/cli" + composeTypes "github.com/compose-spec/compose-go/types" +) + +var ( + ErrContainerRunning = errors.New("container running") + ErrContainerNotRunning = errors.New("container not running") +) + +type ComposeClient struct { + client *Client + versionFile string + composeFile string + upgradeMode config.UpgradeMode +} + +func NewDefaultComposeClient(ctx context.Context, ch CredentialHelper, versionFile, composeFile string, upgradeMode config.UpgradeMode) (*ComposeClient, error) { + dc, err := NewClient(ctx, ch) + if err != nil { + return nil, err + } + + return NewComposeClient(dc, versionFile, composeFile, upgradeMode) +} + +func NewComposeClient(dockerClient *Client, versionFile, composeFile string, upgradeMode config.UpgradeMode) (*ComposeClient, error) { + if !slices.Contains(config.ValidUpgradeModes, upgradeMode) { + return nil, fmt.Errorf("invalid upgradeMode '%s', pick one of %+v", upgradeMode, config.ValidUpgradeModes) + } + + return &ComposeClient{ + client: dockerClient, + versionFile: versionFile, + composeFile: composeFile, + upgradeMode: upgradeMode, + }, nil +} + +func (dcc *ComposeClient) DockerClient() *Client { + return dcc.client +} + +func (dcc *ComposeClient) GetImageAndVersionFromCompose(serviceName string) (string, string, error) { + project, err := LoadComposeFile(dcc.composeFile) + if err != nil { + return "", "", errors.Wrapf(err, "compose file loading failed") + } + + service, err := getServiceFromProject(serviceName, project) + if err != nil { + return "", "", errors.Wrapf(err, "failed to get service from project") + } + image, version, err := ParseImageName(service.Image) + if err != nil { + return "", "", fmt.Errorf("invalid image - missing tag: %s", service.Image) + } + + return image, version, nil +} + +func (dcc *ComposeClient) getVersionFromEnv(serviceName, composeFile string) (string, error) { + version, err := LoadServiceVersionFile(composeFile, serviceName) + if err != nil { + return "", errors.Wrapf(err, "version file loading failed") + } + + return version, nil +} + +func (dcc *ComposeClient) GetVersionForService(serviceName string) (string, error) { + if dcc.upgradeMode == config.UpgradeInEnvFile { + version, err := dcc.getVersionFromEnv(serviceName, dcc.versionFile) + if err != nil { + return "", err + } + return version, nil + } else if dcc.upgradeMode == config.UpgradeInComposeFile { + _, composeVersion, err := dcc.GetImageAndVersionFromCompose(serviceName) + if err != nil { + return "", err + } + return composeVersion, err + } + return "", fmt.Errorf("invalid upgrade mode %+v", dcc.upgradeMode) +} + +func (dcc *ComposeClient) GetPlatform(serviceName string) (string, error) { + project, err := LoadComposeFile(dcc.composeFile) + if err != nil { + return "", errors.Wrapf(err, "compose file loading failed") + } + + service, err := getServiceFromProject(serviceName, project) + if err != nil { + return "", errors.Wrapf(err, "failed to get service from project") + } + + return service.Platform, nil +} + +func (dcc *ComposeClient) UpgradeImage(ctx context.Context, serviceName, newVersion string) error { + switch dcc.upgradeMode { + case config.UpgradeInEnvFile: + return dcc.upgradeImageInEnvFile(ctx, serviceName, newVersion) + case config.UpgradeInComposeFile: + return dcc.upgradeImageInComposeFile(ctx, serviceName, newVersion) + } + return fmt.Errorf("invalid upgrade mode %+v", dcc.upgradeMode) +} + +// Updates the `version` field in the version file, which is used in docker-compose +// to determine the image version image to run +func (dcc *ComposeClient) upgradeImageInEnvFile(ctx context.Context, serviceName, newVersion string) error { + oldVersion, err := LoadServiceVersionFile(dcc.versionFile, serviceName) + if err != nil { + return errors.Wrapf(err, "loading the service version file failed") + } + log.FromContext(ctx).Infof("Updating version on %s from %s to %s", dcc.versionFile, oldVersion, newVersion) + + return dcc.updateVersionFile(serviceName, newVersion) +} + +// Updates version in the `image` field in docker compose +// +// This method does not write the parsed config into yaml, but instead uses simple string replacement +// to preserve user formatting +func (dcc *ComposeClient) upgradeImageInComposeFile(ctx context.Context, serviceName, newVersion string) error { + project, err := LoadComposeFile(dcc.composeFile) + if err != nil { + return errors.Wrapf(err, "compose file loading failed") + } + + currService, err := getServiceFromProject(serviceName, project) + if err != nil { + return err + } + + image, _, err := ParseImageName(currService.Image) + if err != nil { + return err + } + + newImage := fmt.Sprintf("%s:%s", image, newVersion) + if currService.Image == newImage { + log.FromContext(ctx).Warnf("image %s already registered in compose file", image) + return nil + } + + isImagePresent, err := dcc.client.IsImagePresent(ctx, newImage) + if err != nil { + return errors.Wrapf(err, "check for docker image present failed") + } + + if !isImagePresent { + return fmt.Errorf("image %s not present on the system", newImage) + } + + updatedContent, err := readAndReplace(dcc.composeFile, currService.Image, newImage) + if err != nil { + return err + } + + return updateComposeFile(dcc.composeFile, updatedContent) +} + +func (dcc *ComposeClient) Down(ctx context.Context, serviceName string, timeout time.Duration) error { + isImageContainerRunning, err := dcc.IsServiceRunning(ctx, serviceName, timeout) + if err != nil { + return errors.Wrapf(err, "check for container running failed") + } + if !isImageContainerRunning { + return errors.Wrapf(ErrContainerNotRunning, "expected the container to run before calling docker compose down") + } + + // 5 seconds buffer to handle the case when docker timeout (-t) + // takes slightly longer than defined timeout (thus delay context cancellation) + deadline := timeout + 5*time.Second + timeoutSeconds := int(math.Round(timeout.Seconds())) + + err = cmd.ExecuteWithDeadlineAndLog(ctx, deadline, []string{}, "docker", "compose", "-f", dcc.composeFile, "down", "--remove-orphans", "-t", strconv.Itoa(timeoutSeconds)) + if err != nil { + return errors.Wrapf(err, "docker compose down failed") + } + + // verify from docker api that the container is down + isImageContainerRunning, err = dcc.IsServiceRunning(ctx, serviceName, timeout) + if err != nil { + return errors.Wrapf(err, "check for container running failed") + } + if isImageContainerRunning { + return errors.Wrapf(ErrContainerRunning, "compose down didn't stop the container") + } + + return nil +} + +func (dcc *ComposeClient) Up(ctx context.Context, serviceName string, timeout time.Duration, ephemeralEnvVars ...string) error { + isImageContainerRunning, err := dcc.IsServiceRunning(ctx, serviceName, timeout) + if err != nil { + return errors.Wrapf(err, "check for container running failed") + } + if isImageContainerRunning { + return errors.Wrapf(ErrContainerRunning, "expected the container to be down before calling docker compose up") + } + + // docker-compose up supports -t flag but it is only used + // when containers are already running and need to be shut down + // before starting them again, we are ensuring that containers are + // not running at this point, so we don't need to use -t flag + err = cmd.ExecuteWithDeadlineAndLog(ctx, timeout, ephemeralEnvVars, "docker", "compose", "-f", dcc.composeFile, "up", "-d", "--force-recreate") + if err != nil { + return errors.Wrapf(err, "docker compose up failed") + } + + // verify from docker api that the container is up + isImageContainerRunning, err = dcc.IsServiceRunning(ctx, serviceName, timeout) + if err != nil { + return errors.Wrapf(err, "check for container running failed") + } + if !isImageContainerRunning { + return errors.Wrapf(ErrContainerNotRunning, "compose up didn't start container") + } + + return nil +} + +func (dcc *ComposeClient) RestartServiceWithHaltHeight(ctx context.Context, composeConfig *config.ComposeCli, serviceName string, upgradeHeight int64) error { + err := dcc.Down(ctx, serviceName, composeConfig.DownTimeout) + if err != nil && !errors.Is(err, ErrContainerNotRunning) { + return errors.Wrapf(err, "docker compose down failed") + } + + return dcc.Up(ctx, serviceName, composeConfig.UpDeadline, fmt.Sprintf("HALT_HEIGHT=%d", upgradeHeight)) +} +func (dcc *ComposeClient) IsServiceRunning(ctx context.Context, serviceName string, timeout time.Duration) (bool, error) { + // +1s to give some wiggle room for the docker compose cli to respond + containerID, err := dcc.GetContainerID(ctx, serviceName, timeout+time.Second) + if err != nil { + return false, errors.Wrapf(err, "failed to check if service is running") + } + + if containerID == "" { + return false, nil + } + + return dcc.client.IsContainerRunning(ctx, containerID) +} + +func (dcc *ComposeClient) GetContainerID(ctx context.Context, serviceName string, timeout time.Duration) (string, error) { + stdout, stderr, err := cmd.CheckOutputWithDeadline( + ctx, timeout, []string{}, "docker", "compose", "-f", dcc.composeFile, "ps", "-a", "-q", + // we are only interested in finding the running container + // --status stringArray Filter services by status. Values: [paused | restarting | removing | running | dead | created | exited] + "--status", "restarting", + "--status", "removing", + "--status", "running", + "--status", "created", + serviceName, + ) + if err != nil { + return "", errors.Wrapf(err, "docker compose ps failed: %s", stderr.String()) + } + + // the cli returns the one container id per line, we expect only one container + containers := []string{} + for _, line := range strings.Split(stdout.String(), "\n") { + // we expect the container id to be 64 characters long + trimmed := strings.TrimSpace(line) + if trimmed != "" && len(trimmed) == 64 { + containers = append(containers, trimmed) + } + } + + if len(containers) > 1 { + return "", fmt.Errorf("multiple containers found for service: %s, IDs: %s", serviceName, containers) + } + + // no container means likely that a service is down, this is not an error + if len(containers) == 0 { + return "", nil + } + + return containers[0], nil +} + +func (dcc *ComposeClient) Version(ctx context.Context) (string, error) { + stdout, _, err := cmd.CheckOutputWithDeadline(ctx, 2*time.Second, []string{}, "docker", "compose", "version", "--short") + + return strings.ReplaceAll(stdout.String(), "\n", ""), err +} + +func (dcc *ComposeClient) updateVersionFile(serviceName, newContent string) error { + versions, err := GetServiceVersions(dcc.versionFile) + if err != nil { + return err + } + + found := false + lines := []string{} + for _, service := range versions { + if service.Name == serviceName { + found = true + service.Version = newContent + } + lines = append(lines, fmt.Sprintf("VERSION_%s=%s", service.Name, service.Version)) + } + + if !found { + return fmt.Errorf("could not find VERSION_%s on %s", serviceName, dcc.versionFile) + } + + err = os.WriteFile(dcc.versionFile, []byte(strings.Join(lines, "\n")), 0600) + if err != nil { + return errors.Wrapf(err, "failed to update the version file %s", dcc.versionFile) + } + + return nil +} + +type ServiceVersionLine struct { + Name string + Version string +} + +func GetServiceVersions(filename string) ([]ServiceVersionLine, error) { + content, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + var services []ServiceVersionLine + lines := strings.Split(string(content), "\n") + + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue // Skip empty lines and comments + } + + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 || !strings.HasPrefix(parts[0], "VERSION_") { + continue // Skip invalid lines + } + + serviceName := strings.TrimPrefix(parts[0], "VERSION_") + services = append(services, ServiceVersionLine{Name: serviceName, Version: parts[1]}) + } + + return services, nil +} + +func LoadServiceVersionFile(versionFile, serviceName string) (string, error) { + versions, err := GetServiceVersions(versionFile) + if err != nil { + return "", err + } + + for _, ver := range versions { + if ver.Name == serviceName { + return ver.Version, nil + } + } + + return "", fmt.Errorf("could not find VERSION_%s on %s", serviceName, versionFile) +} + +func LoadComposeFile(composeFile string) (*composeTypes.Project, error) { + opts, err := compose.NewProjectOptions([]string{composeFile}, + compose.WithDiscardEnvFile, + compose.WithOsEnv, + compose.WithDotEnv, + compose.WithInterpolation(true), + ) + if err != nil { + return nil, err + } + + project, err := compose.ProjectFromOptions(opts) + if err != nil { + return nil, err + } + + // this should not happen, but just in case + if svcs := len(project.Services); svcs == 0 { + return nil, errors.New("no services found in docker compose file") + } + + for _, service := range project.Services { + if service.Image == "" { + return nil, fmt.Errorf("service %s has no image defined", service.Name) + } + } + + return project, nil +} + +func verifyCompose(baseDir string, content string) error { + // We need to create temp file in the same dir as the compose + // may have an env_file section and loading it will fail if + // the files aren't found + f, err := os.CreateTemp(baseDir, "docker-compose-upgraded.*.blazar") + if err != nil { + return err + } + defer os.Remove(f.Name()) + + if err := os.WriteFile(f.Name(), []byte(content), 0600); err != nil { + return err + } + + _, err = LoadComposeFile(f.Name()) + if err != nil { + return errors.Wrapf(err, "compose validation failed") + } + + return nil +} + +func readAndReplace(path, from, to string) (string, error) { + contentBytes, err := os.ReadFile(path) + if err != nil { + return "", errors.Wrapf(err, "failed to read file") + } + content := string(contentBytes) + + if strings.Count(content, from) != 1 { + return "", fmt.Errorf("file contains multiple instances of %s string", from) + } + + return strings.Replace(content, from, to, 1), nil +} + +func updateComposeFile(composeFile, newContent string) error { + composeDir := filepath.Dir(composeFile) + composeFilePath := filepath.Base(composeFile) + + if err := verifyCompose(composeDir, newContent); err != nil { + return err + } + + // Backup the old file anyway + filename := fmt.Sprintf("%s.%s.blazar.bkp", composeFilePath, time.Now().UTC().Format(time.RFC3339)) + err := os.Rename(composeFile, filepath.Join(composeDir, filename)) + if err != nil { + return errors.Wrapf(err, "backup of %s failed", composeFile) + } + + // Replace the old file with the new one + err = os.WriteFile(composeFile, []byte(newContent), 0600) + if err != nil { + return errors.Wrapf(err, "failed to update compose file %s", composeFile) + } + + return nil +} + +func getServiceFromProject(serviceName string, project *composeTypes.Project) (*composeTypes.ServiceConfig, error) { + for _, service := range project.Services { + if service.Name == serviceName { + return &service, nil + } + } + return nil, fmt.Errorf("service %s not found in compose file", serviceName) +} diff --git a/internal/pkg/docker/compose_test.go b/internal/pkg/docker/compose_test.go new file mode 100644 index 0000000..d668790 --- /dev/null +++ b/internal/pkg/docker/compose_test.go @@ -0,0 +1,331 @@ +package docker + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + + "strings" + "testing" + "time" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/testutils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestValidate(t *testing.T) { + // not checking malformed docker-compose files + // since that should be taken care by the compose-go package + tests := []struct { + name string + dir string + expectedErr error + }{ + { + name: "InvalidComposeNoImage", + dir: "compose-no-image", + expectedErr: errors.New("service s1 has no image defined"), + }, + { + name: "ValidCompose", + dir: "compose-valid", + expectedErr: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", test.dir, test.dir) + composePath := filepath.Join(tempDir, "docker-compose.yml") + + if _, err := LoadComposeFile(composePath); test.expectedErr != nil { + assert.Equal(t, test.expectedErr.Error(), err.Error()) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestGetServiceVersionsMultiple(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", "envfile", "envfile") + envPath := filepath.Join(tempDir, "env-with-multiple-services") + versions, err := GetServiceVersions(envPath) + expected := []ServiceVersionLine{ + { + Name: "s1", + Version: "5.3", + }, + { + Name: "S1", + Version: "5.4", + }, + } + require.NoError(t, err) + assert.Equal(t, expected, versions) +} + +func TestGetImageVersionFile(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", "envfile", "envfile") + envPath := filepath.Join(tempDir, "env") + composeTempDir := testutils.PrepareTestData(t, "docker", "compose-valid", "compose-valid") + composePath := filepath.Join(composeTempDir, "docker-compose.yml") + + _, dcc := newDockerComposeClientWithCtx(t, envPath, composePath, config.UpgradeInEnvFile) + + version, err := dcc.GetVersionForService("s1") + require.NoError(t, err) + assert.Equal(t, "5.3", version) +} + +func TestGetImageComposeFile(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", "compose-valid", "compose-valid") + composePath := filepath.Join(tempDir, "docker-compose.yml") + + _, dcc := newDockerComposeClientWithCtx(t, "", composePath, config.UpgradeInComposeFile) + + version, err := dcc.GetVersionForService("s1") + require.NoError(t, err) + assert.Equal(t, "latest", version) +} + +func TestUpgradeImageVersionFile(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", "compose-upgrade-test", "compose-upgrade-test") + composePath := filepath.Join(tempDir, "docker-compose.yml") + + envDir := testutils.PrepareTestData(t, "docker", "envfile", "envfile") + envPath := filepath.Join(envDir, "env") + + ctx, dcc := newDockerComposeClientWithCtx(t, envPath, composePath, config.UpgradeInEnvFile) + originalVersion := "5.3" + testutils.MakeImageWith(t, "abcd/efgh", originalVersion, dockerProvider) + // step 1: assert image name + version, err := dcc.GetVersionForService("s1") + require.NoError(t, err) + assert.Equal(t, originalVersion, version) + + // step 2: upgrade image + testVersion := "unset" + err = dcc.UpgradeImage(ctx, "s1", testVersion) + require.NoError(t, err) + + // step 3: assert that docker-compose.yaml was updated + version, err = dcc.GetVersionForService("s1") + require.NoError(t, err) + assert.Equal(t, testVersion, version) +} + +func TestUpgradeImageMultipleVersionFile(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", "compose-upgrade-test", "compose-upgrade-test") + composePath := filepath.Join(tempDir, "docker-compose.yml") + + envDir := testutils.PrepareTestData(t, "docker", "envfile", "envfile") + envPath := filepath.Join(envDir, "env-with-multiple-services") + + ctx, dcc := newDockerComposeClientWithCtx(t, envPath, composePath, config.UpgradeInEnvFile) + originalVersion := "5.3" + testutils.MakeImageWith(t, "abcd/efgh", originalVersion, dockerProvider) + // step 1: assert image name + version, err := dcc.GetVersionForService("s1") + require.NoError(t, err) + assert.Equal(t, originalVersion, version) + + // and assert unrelated service + version, err = dcc.GetVersionForService("S1") + require.NoError(t, err) + assert.Equal(t, "5.4", version) + + // step 2: upgrade image + testVersion := "unset" + err = dcc.UpgradeImage(ctx, "s1", testVersion) + require.NoError(t, err) + + // step 3: assert that the version file was updated + version, err = dcc.GetVersionForService("s1") + require.NoError(t, err) + assert.Equal(t, testVersion, version) + + // step 4: assert that the other service was NOT updated + version, err = dcc.GetVersionForService("S1") + require.NoError(t, err) + assert.Equal(t, "5.4", version) +} +func TestUpgradeImageComposeFile(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", "compose-upgrade-test", "compose-upgrade-test") + composePath := filepath.Join(tempDir, "docker-compose.yml") + + ctx, dcc := newDockerComposeClientWithCtx(t, "", composePath, config.UpgradeInComposeFile) + testVersion := "new-version" + testutils.MakeImageWith(t, "abcd/efgh", testVersion, dockerProvider) + + // step 1: assert image name + // The version is parsed from the compose file, not from the running image + version, err := dcc.GetVersionForService("s1") + require.NoError(t, err) + assert.Equal(t, "ijkl", version) + + // step 2: upgrade image + err = dcc.UpgradeImage(ctx, "s1", testVersion) + require.NoError(t, err) + + // step 3: assert that docker-compose.yaml was updated + version, err = dcc.GetVersionForService("s1") + require.NoError(t, err) + + assert.Equal(t, testVersion, version) +} + +func TestUpDownCompose(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", "compose-valid-template", "compose-valid-template") + composePath := filepath.Join(tempDir, "docker-compose.yml") + + ctx, dcc := newDockerComposeClientWithCtx(t, "", composePath, config.UpgradeInComposeFile) + testutils.MakeImageWith(t, "image", "version", dockerProvider) + + err := testutils.WriteTmpl( + filepath.Join(tempDir, "docker-compose.yml.tmpl"), + struct{ Image string }{Image: "image:version"}, + ) + require.NoError(t, err) + + tests := []struct { + name string + testFn func(t *testing.T, composePath string) + }{ + { + name: "simple down test with docker compose", + testFn: func(t *testing.T, composePath string) { + cmd := exec.Command("docker", "compose", "-f", composePath, "up", "-d") + err := cmd.Run() + require.NoError(t, err) + + err = dcc.Down(ctx, "s1", 0) + require.NoError(t, err) + + isRunning, err := dcc.IsServiceRunning(ctx, "s1", 5*time.Second) + require.NoError(t, err) + assert.False(t, isRunning) + }, + }, + { + name: "simulate case where docker compose down didn't kill the container", + testFn: func(t *testing.T, composePath string) { + // step 1: start test container + err = exec.Command("docker", "compose", "-f", composePath, "up", "-d").Run() + require.NoError(t, err) + + // step 2: get docker binary path + cmd := exec.Command("which", "docker") + buf := new(bytes.Buffer) + cmd.Stdout = buf + require.NoError(t, cmd.Run()) + dockerBinaryPath := strings.ReplaceAll(buf.String(), "\n", "") + + // step 3: fake docker binary to simulate docker compose down timeout (container isn't stopped) + err = os.WriteFile(filepath.Join(tempDir, "docker"), []byte(fmt.Sprintf( + `#!/bin/sh + # this is to allow compose client to check if container is running + # we expect format such as: + # compose -f /tmp/.../docker-compose.yml ps -a -q s1 + if [ "$4" = "ps" ]; then %s "$@"; else exit 0; fi + `, dockerBinaryPath, + )), 0700) + require.NoError(t, err) + + oldPath := os.Getenv("PATH") + os.Setenv("PATH", tempDir+":"+oldPath) + + // step 4: with docker compose client try to stop container and expect it still running + err = dcc.Down(ctx, "s1", 0) + require.ErrorIs(t, err, ErrContainerRunning) + + // step 5: restore docker binary, and see the docker client take down the container + os.Setenv("PATH", oldPath) + err = dcc.Down(ctx, "s1", 0) + require.NoError(t, err) + }, + }, + { + name: "simple up test with docker compose", + testFn: func(t *testing.T, composePath string) { + isRunning, err := dcc.IsServiceRunning(ctx, "s1", 5*time.Second) + require.NoError(t, err) + assert.False(t, isRunning) + + err = dcc.Up(ctx, "s1", 10*time.Second) + require.NoError(t, err) + + isRunning, err = dcc.IsServiceRunning(ctx, "s1", 5*time.Second) + require.NoError(t, err) + assert.True(t, isRunning) + + // cleanup + err = exec.Command("docker", "compose", "-f", composePath, "down").Run() + require.NoError(t, err) + }, + }, + { + name: "simulate case where docker compose up didn't start the container", + testFn: func(t *testing.T, _ string) { + // step 1: fake docker binary to simulate docker compose up timeout (container isn't running) + err = os.WriteFile(filepath.Join(tempDir, "docker"), []byte("#!/bin/sh\nexit 0\n"), 0600) + require.NoError(t, err) + + oldPath := os.Getenv("PATH") + os.Setenv("PATH", tempDir+":"+oldPath) + + // step 2: with docker compose client try to start container and expect it to fail + err = dcc.Down(ctx, "s1", 0) + require.ErrorIs(t, err, ErrContainerNotRunning) + + os.Setenv("PATH", oldPath) + }, + }, + } + + for _, test := range tests { + test.testFn(t, composePath) + } +} + +func TestRestartEnvCompose(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", "compose-env-echo", "compose-env-echo") + composePath := filepath.Join(tempDir, "docker-compose.yml") + + ctx, dcc := newDockerComposeClientWithCtx(t, "", composePath, config.UpgradeInComposeFile) + testutils.MakeEnvEchoImageWith(t, dockerProvider) + err := dcc.Up(ctx, "s1", 5*time.Second) + require.NoError(t, err) + + // First run without env var, we get back an empty HALT_HEIGHT + resp, err := http.Get("http://127.0.0.1:4444") + require.NoError(t, err) + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + assert.True(t, strings.Contains(string(body), "HALT_HEIGHT=\n")) + + // Upon restart, we get back a populated HALT_HEIGHT + composeConfig := config.ComposeCli{ + DownTimeout: 5 * time.Second, + UpDeadline: 5 * time.Second, + } + err = dcc.RestartServiceWithHaltHeight(ctx, &composeConfig, "s1", 1234) + require.NoError(t, err) + resp, err = http.Get("http://127.0.0.1:4444") + require.NoError(t, err) + body, err = io.ReadAll(resp.Body) + require.NoError(t, err) + assert.True(t, strings.Contains(string(body), "HALT_HEIGHT=1234\n")) + + // this Down is not meaningful for the test, only cleans up the test environment + err = dcc.Down(ctx, "s1", 2*time.Second) + require.NoError(t, err) +} diff --git a/internal/pkg/docker/credential_helper.go b/internal/pkg/docker/credential_helper.go new file mode 100644 index 0000000..4ab7666 --- /dev/null +++ b/internal/pkg/docker/credential_helper.go @@ -0,0 +1,61 @@ +package docker + +import ( + "context" + "encoding/base64" + "encoding/json" + "time" + + "blazar/internal/pkg/cmd" + "blazar/internal/pkg/errors" + + "github.com/docker/docker-credential-helpers/credentials" + "github.com/docker/docker/api/types/registry" +) + +var ( + ErrCredHelperEmpty = errors.New("docker credential helper returned empty username or password") +) + +type CredentialHelper interface { + GetRegistryAuth(ctx context.Context) (string, error) +} + +type commandCredentialHelper struct { + command string + timeout time.Duration +} + +func (cch *commandCredentialHelper) GetRegistryAuth(ctx context.Context) (string, error) { + stdout, _, err := cmd.CheckOutputWithDeadline(ctx, cch.timeout, []string{}, cch.command, "get") + if err != nil { + return "", errors.Wrapf(err, "error running docker credential helper") + } + + var creds credentials.Credentials + if err := json.Unmarshal(stdout.Bytes(), &creds); err != nil { + return "", errors.Wrapf(err, "error unmarshalling docker credential helper output") + } + if creds.Username == "" || creds.Secret == "" { + return "", ErrCredHelperEmpty + } + + auth := registry.AuthConfig{ + Username: creds.Username, + Password: creds.Secret, + } + + jsonAuth, err := json.Marshal(auth) + if err != nil { + return "", errors.Wrapf(err, "error marshalling docker auth config") + } + + return base64.URLEncoding.EncodeToString(jsonAuth), nil +} + +func NewCredentialHelper(command string, timeout time.Duration) CredentialHelper { + return &commandCredentialHelper{ + command: command, + timeout: timeout, + } +} diff --git a/internal/pkg/docker/credential_helper_test.go b/internal/pkg/docker/credential_helper_test.go new file mode 100644 index 0000000..b493110 --- /dev/null +++ b/internal/pkg/docker/credential_helper_test.go @@ -0,0 +1,65 @@ +package docker + +import ( + "context" + "path/filepath" + "testing" + "time" + + "blazar/internal/pkg/testutils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCommandCredentialHelper(t *testing.T) { + tempDir := testutils.PrepareTestData(t, "docker", "docker-credential-helper", "") + + tests := []struct { + name string + file string + assertFn func(error) + }{ + { + name: "Valid", + file: "/valid.sh", + assertFn: func(err error) { + require.NoError(t, err) + }, + }, + { + name: "WrongJson", + file: "/wrong-json.sh", + assertFn: func(err error) { + assert.ErrorIs(t, err, ErrCredHelperEmpty) + }, + }, + { + name: "Exit1", + file: "/exit-1.sh", + assertFn: func(err error) { + assert.ErrorContains(t, err, "exit status 1") + }, + }, + { + name: "Sleep", + file: "/sleep.sh", + assertFn: func(err error) { + require.ErrorIs(t, err, context.DeadlineExceeded) + assert.ErrorContains(t, err, "signal: killed") + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(_ *testing.T) { + credHelper := commandCredentialHelper{ + command: filepath.Join(tempDir, test.file), + timeout: time.Second, + } + + _, err := credHelper.GetRegistryAuth(context.Background()) + test.assertFn(err) + }) + } +} diff --git a/internal/pkg/docker/docker.go b/internal/pkg/docker/docker.go new file mode 100644 index 0000000..565cf8d --- /dev/null +++ b/internal/pkg/docker/docker.go @@ -0,0 +1,133 @@ +package docker + +import ( + "context" + "fmt" + "io" + "strings" + "time" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/errors" + "blazar/internal/pkg/log" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +type Client struct { + client *client.Client + credentialHelper CredentialHelper +} + +func NewClientWithConfig(ctx context.Context, cfg *config.DockerCredentialHelper) (*Client, error) { + if cfg != nil { + return NewClientWithCredsHelper(ctx, cfg.Command, cfg.Timeout) + } + return NewClient(ctx, nil) +} + +func NewClientWithCredsHelper(ctx context.Context, cmd string, timeout time.Duration) (*Client, error) { + return NewClient(ctx, NewCredentialHelper(cmd, timeout)) +} + +func NewClient(ctx context.Context, ch CredentialHelper) (*Client, error) { + dc, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + return nil, err + } + dc.NegotiateAPIVersion(ctx) + + return &Client{ + client: dc, + credentialHelper: ch, + }, nil +} + +func (dc *Client) IsImagePresent(ctx context.Context, name string) (bool, error) { + images, err := dc.client.ImageList(ctx, types.ImageListOptions{}) + if err != nil { + return false, err + } + + for _, image := range images { + for _, repoTag := range image.RepoTags { + if repoTag == name { + return true, nil + } + } + } + + return false, nil +} + +func (dc *Client) PullImage(ctx context.Context, name string, platform string) error { + imagePullOptions := types.ImagePullOptions{ + Platform: platform, + } + + if dc.credentialHelper != nil { + creds, err := dc.credentialHelper.GetRegistryAuth(ctx) + if err != nil { + return errors.Wrapf(err, "failed to get authorization token using credential helper") + } + imagePullOptions.RegistryAuth = creds + } + + reader, err := dc.client.ImagePull(ctx, name, imagePullOptions) + if err != nil { + return err + } + defer reader.Close() + + logger := log.FromContext(ctx) + + // read and log pull response + buf := make([]byte, 8*1024) + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + n, err := reader.Read(buf) + if err == io.EOF { + return nil + } else if err != nil { + return errors.Wrapf(err, "failed to read from image pull response") + } + logger.Infof("%s", string(buf[:n])) + } + } +} + +func (dc *Client) IsContainerRunning(ctx context.Context, containerID string) (bool, error) { + if containerID == "" { + return false, errors.New("containerId is empty") + } + + containers, err := dc.client.ContainerList(ctx, types.ContainerListOptions{}) + if err != nil { + return false, err + } + + for _, container := range containers { + if container.ID == containerID { + return true, nil + } + } + + return false, nil +} + +func (dc *Client) ContainerList(ctx context.Context, all bool) ([]types.Container, error) { + return dc.client.ContainerList(ctx, types.ContainerListOptions{All: all}) +} + +func ParseImageName(imageName string) (string, string, error) { + parts := strings.Split(imageName, ":") + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid image name: %s", imageName) + } + + return parts[0], parts[1], nil +} diff --git a/internal/pkg/docker/docker_test.go b/internal/pkg/docker/docker_test.go new file mode 100644 index 0000000..16e37b6 --- /dev/null +++ b/internal/pkg/docker/docker_test.go @@ -0,0 +1,84 @@ +package docker + +import ( + "fmt" + "testing" + + "blazar/internal/pkg/errors" + "blazar/internal/pkg/testutils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIsImagePresent(t *testing.T) { + img, ver := "img", "tag" + testutils.MakeImageWith(t, img, ver, dockerProvider) + tests := []struct { + image string + isPresent bool + }{ + { + image: fmt.Sprintf("%s:%s", img, ver), + isPresent: true, + }, + { + image: fmt.Sprintf("%s:%sinvalidtag", img, ver), + isPresent: false, + }, + } + + ctx, dc := newDockerClientWithCtx(t) + + for _, test := range tests { + isPresent, err := dc.IsImagePresent(ctx, test.image) + require.NoError(t, err) + assert.Equal(t, test.isPresent, isPresent) + } +} + +func TestParseImageName(t *testing.T) { + tests := []struct { + imageName string + image string + tag string + err error + }{ + { + imageName: "testrepo/testimage:latest", + image: "testrepo/testimage", + tag: "latest", + err: nil, + }, + { + imageName: "testrepo/testimage:latest:invalid", + image: "", + tag: "", + err: errors.New("invalid image name: testrepo/testimage:latest:invalid"), + }, + { + imageName: "testrepo/testimage", + image: "", + tag: "", + err: errors.New("invalid image name: testrepo/testimage"), + }, + } + + for _, test := range tests { + image, tag, err := ParseImageName(test.imageName) + assert.Equal(t, test.image, image) + assert.Equal(t, test.tag, tag) + assert.Equal(t, test.err, err) + } +} + +func TestPullImage(t *testing.T) { + ctx, dc := newDockerClientWithCtx(t) + + // We need to use an existing image for this test + err := dc.PullImage(ctx, "luca3m/sleep:latest", "linux/amd64") + require.NoError(t, err) + + err = dc.PullImage(ctx, "luca3m/sleep:invalidtag", "linux/amd64") + assert.Error(t, err) +} diff --git a/internal/pkg/docker/utils_test.go b/internal/pkg/docker/utils_test.go new file mode 100644 index 0000000..f69da21 --- /dev/null +++ b/internal/pkg/docker/utils_test.go @@ -0,0 +1,46 @@ +package docker + +import ( + "context" + "fmt" + "os" + "testing" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/testutils" + + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" +) + +var ( + dockerProvider *testcontainers.DockerProvider +) + +func TestMain(m *testing.M) { + var err error + dockerProvider, err = testcontainers.NewDockerProvider() + if err != nil { + fmt.Println("failed to create docker provider") + os.Exit(1) + } + + code := m.Run() + os.Exit(code) +} + +func newDockerClientWithCtx(t *testing.T) (context.Context, *Client) { + ctx := testutils.NewContext() + dc, err := NewClient(ctx, nil) + require.NoError(t, err) + + return ctx, dc +} + +func newDockerComposeClientWithCtx(t *testing.T, versionFile, composeFile string, upgradeMode config.UpgradeMode) (context.Context, *ComposeClient) { + ctx := testutils.NewContext() + dcc, err := NewDefaultComposeClient(ctx, nil, versionFile, composeFile, upgradeMode) + require.NoError(t, err) + + return ctx, dcc +} diff --git a/internal/pkg/errors/errors.go b/internal/pkg/errors/errors.go new file mode 100644 index 0000000..9541db6 --- /dev/null +++ b/internal/pkg/errors/errors.go @@ -0,0 +1,73 @@ +package errors + +import ( + "errors" + "fmt" + "unsafe" +) + +// Wrapf convinient function to wrap errors +func Wrapf(err error, text string, args ...interface{}) error { + return fmt.Errorf(text+": %w", append(args, err)...) +} + +// New is equivalent of errors.New +func New(text string) error { + return errors.New(text) +} + +// Is is equivalent of errrors.Is +func Is(err, target error) bool { + return errors.Is(err, target) +} + +// Unwrap is equivalent of errrors.Is +func Unwrap(err error) error { + return errors.Unwrap(err) +} + +// Join is an equivalent of errors.Join however it doesn't add new line when printing errors +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + // Since Join returns nil if every value in errs is nil, + // e.errs cannot be empty. + if len(e.errs) == 1 { + return e.errs[0].Error() + } + + b := []byte(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b = append(b, ':', ' ') + b = append(b, err.Error()...) + } + // At this point, b has at least one byte '\n'. + return unsafe.String(&b[0], len(b)) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/internal/pkg/file_watcher/file_watcher.go b/internal/pkg/file_watcher/file_watcher.go new file mode 100644 index 0000000..b0844ab --- /dev/null +++ b/internal/pkg/file_watcher/file_watcher.go @@ -0,0 +1,107 @@ +package file_watcher + +import ( + "os" + "time" + + "blazar/internal/pkg/errors" +) + +type FileChangeEvent int + +const ( + FileRemoved FileChangeEvent = iota + 1 + FileCreated + FileModified +) + +type NewFileChangeEvent struct { + Event FileChangeEvent + Error error +} + +type FileWatcher struct { + // full path to a watched file + lastModTime time.Time + exists bool + ChangeEvents <-chan NewFileChangeEvent + cancel chan<- struct{} +} + +// Returns if the file exists, file watcher, error +func NewFileWatcher(filepath string, interval time.Duration) (bool, *FileWatcher, error) { + // In case file doesn't exist, modTime will be "zero" + // so we can still use it to check for "file change" + // as modTime of created file will be be greater than this + initExists, initModTime, err := getFileStatus(filepath) + if err != nil { + return false, nil, errors.Wrapf(err, "failed to check %s status", filepath) + } + + events := make(chan NewFileChangeEvent) + cancel := make(chan struct{}) + + fw := &FileWatcher{ + lastModTime: initModTime, + exists: initExists, + ChangeEvents: events, + cancel: cancel, + } + + go func() { + ticker := time.NewTicker(interval) + for { + select { + case <-ticker.C: + var newEvent NewFileChangeEvent + exists, modTime, err := getFileStatus(filepath) + if err != nil { + newEvent.Error = err + } else { + if exists != fw.exists { + if exists { + fw.lastModTime = modTime + newEvent.Event = FileCreated + } else { + newEvent.Event = FileRemoved + } + fw.exists = exists + } else if modTime.After(fw.lastModTime) { + fw.lastModTime = modTime + newEvent.Event = FileModified + } + } + select { + case events <- newEvent: + // to prevent deadlock with events channel + case <-cancel: + return + } + // this isn't necessary since we exit in the above select statement + // but this will help in early exit in case cancel is called before the ticker fires + case <-cancel: + return + } + } + }() + return initExists, fw, nil +} + +func (fw *FileWatcher) Cancel() { + fw.cancel <- struct{}{} +} + +// Checks if the file exists and returns the timestamp of the last modification +// returns exists, modTime, error +func getFileStatus(file string) (bool, time.Time, error) { + stat, err := os.Stat(file) + + switch { + case os.IsNotExist(err): + return false, time.Time{}, nil + case err != nil: + return false, time.Time{}, err + } + + return true, stat.ModTime(), nil +} diff --git a/internal/pkg/log/logger/logger.go b/internal/pkg/log/logger/logger.go new file mode 100644 index 0000000..d2f66bf --- /dev/null +++ b/internal/pkg/log/logger/logger.go @@ -0,0 +1,35 @@ +package logger + +import ( + "context" + "os" + "time" + + "github.com/rs/zerolog" +) + +type loggerKey struct{} + +func NewLogger() *zerolog.Logger { + output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.Kitchen} + logger := zerolog.New(output).With().Str("module", "blazar").Timestamp().Logger() + return &logger +} + +func SetGlobalLogLevel(level int8) { + zerolog.SetGlobalLevel(zerolog.Level(level)) +} + +// WithContext returns a new context with the logger +func WithContext(ctx context.Context, logger *zerolog.Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// FromContext returns the logger in the context if it exists, otherwise a new logger is returned +func FromContext(ctx context.Context) *zerolog.Logger { + logger := ctx.Value(loggerKey{}) + if l, ok := logger.(*zerolog.Logger); ok { + return l + } + return NewLogger() +} diff --git a/internal/pkg/log/multi.go b/internal/pkg/log/multi.go new file mode 100644 index 0000000..e648cb0 --- /dev/null +++ b/internal/pkg/log/multi.go @@ -0,0 +1,147 @@ +package log + +import ( + "context" + "fmt" + "strings" + + "github.com/rs/zerolog" + + "blazar/internal/pkg/log/logger" + "blazar/internal/pkg/log/notification" +) + +type MultiLogger struct { + logger *zerolog.Logger + notifier *notification.FallbackNotifier + + level zerolog.Level + msg string + err error +} + +func FromContext(ctx context.Context) *MultiLogger { + return &MultiLogger{ + logger: logger.FromContext(ctx), + notifier: notification.FromContextFallback(ctx), + } +} + +func (c *MultiLogger) WithContext(ctx context.Context) context.Context { + ctx = logger.WithContext(ctx, c.logger) + ctx = notification.WithContextFallback(ctx, c.notifier) + + return ctx +} + +func (c *MultiLogger) With(key, value string) *MultiLogger { + newLogger := c.logger.With().Str(key, value).Logger() + c.logger = &newLogger + return c +} + +func (c *MultiLogger) Debug(msg string) *MultiLogger { + l := newLogger(c, msg, zerolog.DebugLevel) + if c.err != nil { + l.logger.Debug().Err(c.err).Msg(c.msg) + return l.Err(c.err) + } + + l.logger.Debug().Msg(msg) + + return l +} + +func (c *MultiLogger) Debugf(format string, v ...interface{}) *MultiLogger { + return c.Debug(fmt.Sprintf(format, v...)) +} + +func (c *MultiLogger) Info(msg string) *MultiLogger { + l := newLogger(c, msg, zerolog.InfoLevel) + if c.err != nil { + l.logger.Info().Err(c.err).Msg(c.msg) + return l.Err(c.err) + } + + l.logger.Info().Msg(msg) + + return l +} + +func (c *MultiLogger) Infof(format string, v ...interface{}) *MultiLogger { + return c.Info(fmt.Sprintf(format, v...)) +} + +func (c *MultiLogger) Warn(msg string) *MultiLogger { + l := newLogger(c, msg, zerolog.WarnLevel) + if c.err != nil { + l.logger.Warn().Err(c.err).Msg(c.msg) + return l.Err(c.err) + } + + l.logger.Warn().Msg(msg) + + return l +} + +func (c *MultiLogger) Warnf(format string, v ...interface{}) *MultiLogger { + return c.Warn(fmt.Sprintf(format, v...)) +} + +func (c *MultiLogger) Error(msg string) *MultiLogger { + l := newLogger(c, msg, zerolog.ErrorLevel) + l.logger.Error().Err(c.err).Msg(msg) + + return l.Err(c.err) +} + +func (c *MultiLogger) Errorf(err error, format string, v ...interface{}) *MultiLogger { + return c.Err(err).Error(fmt.Sprintf(format, v...)) +} + +func (c *MultiLogger) Notify(ctx context.Context, msgs ...string) *MultiLogger { + msg := c.msg + if len(msgs) > 0 { + msg = strings.Join(msgs, " ") + } + + switch c.level { + case zerolog.InfoLevel: + c.notifier.NotifyInfo(ctx, msg) + case zerolog.WarnLevel: + if c.err == nil { + c.notifier.NotifyWarn(ctx, msg) + } else { + c.notifier.NotifyWarnWithErr(ctx, msg, c.err) + } + case zerolog.ErrorLevel: + c.notifier.NotifyErr(ctx, msg, c.err) + default: + panic("unsupported log level for notification") + } + + return c +} + +func (c *MultiLogger) Notifyf(ctx context.Context, format string, v ...interface{}) *MultiLogger { + return c.Notify(ctx, fmt.Sprintf(format, v...)) +} + +func (c *MultiLogger) Err(err error) *MultiLogger { + return &MultiLogger{ + logger: c.logger, + notifier: c.notifier, + level: c.level, + msg: c.msg, + err: err, + } +} + +func newLogger(c *MultiLogger, msg string, level zerolog.Level) *MultiLogger { + return &MultiLogger{ + logger: c.logger, + notifier: c.notifier, + level: level, + msg: msg, + } +} diff --git a/internal/pkg/log/notification/fallback.go b/internal/pkg/log/notification/fallback.go new file mode 100644 index 0000000..86d7d33 --- /dev/null +++ b/internal/pkg/log/notification/fallback.go @@ -0,0 +1,142 @@ +package notification + +import ( + "context" + "sync" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/metrics" + + "github.com/rs/zerolog" +) + +type FallbackNotifier struct { + metrics *metrics.Metrics + logger *zerolog.Logger + notifier Notifier + + // map the first message of the thread to the upgrade height + // and group the mssages into threaded conversation + // if the underlying notifier supports it + // TODO: the thread mapping is not persisted, so it will be lost on restart + lock sync.RWMutex + upgradeThreads map[int64]string +} + +// NewFallbackNotifier creates a new notifier with fallback to logger +func NewFallbackNotifier(cfg *config.Config, metrics *metrics.Metrics, logger *zerolog.Logger, hostname string) *FallbackNotifier { + return &FallbackNotifier{ + metrics: metrics, + logger: logger, + notifier: NewNotifier(cfg, hostname), + + lock: sync.RWMutex{}, + upgradeThreads: make(map[int64]string), + } +} + +func (cn *FallbackNotifier) NotifyInfo(ctx context.Context, message string) { + if cn.notifier != nil { + parentMessageID, upgradeHeight := cn.getParentMessage(ctx) + messageID, err := cn.notifier.NotifyInfo(message, MsgOptionParent(parentMessageID)) + if err != nil { + if cn.metrics != nil { + cn.metrics.NotifErrs.Inc() + } + cn.logger.Error().Err(err).Msg("Failed to notify") + } else { + cn.registerUpgradeThread(upgradeHeight, parentMessageID, messageID) + } + } +} + +func (cn *FallbackNotifier) NotifyWarnWithErr(ctx context.Context, message string, err error) { + if cn.notifier != nil { + parentMessageID, upgradeHeight := cn.getParentMessage(ctx) + messageID, err := cn.notifier.NotifyWarn(message, MsgOptionParent(parentMessageID), MsgOptionError(err)) + if err != nil { + if cn.metrics != nil { + cn.metrics.NotifErrs.Inc() + } + cn.logger.Error().Err(err).Msg("Failed to notify") + } else { + cn.registerUpgradeThread(upgradeHeight, parentMessageID, messageID) + } + } +} + +func (cn *FallbackNotifier) NotifyWarn(ctx context.Context, message string) { + if cn.notifier != nil { + parentMessageID, upgradeHeight := cn.getParentMessage(ctx) + messageID, err := cn.notifier.NotifyWarn(message, MsgOptionParent(parentMessageID)) + if err != nil { + if cn.metrics != nil { + cn.metrics.NotifErrs.Inc() + } + cn.logger.Error().Err(err).Msg("Failed to notify") + } else { + cn.registerUpgradeThread(upgradeHeight, parentMessageID, messageID) + } + } +} + +func (cn *FallbackNotifier) NotifyErr(ctx context.Context, message string, err error) { + if cn.notifier != nil { + parentMessageID, upgradeHeight := cn.getParentMessage(ctx) + messageID, err := cn.notifier.NotifyErr(message, MsgOptionParent(parentMessageID), MsgOptionError(err)) + if err != nil { + if cn.metrics != nil { + cn.metrics.NotifErrs.Inc() + } + cn.logger.Error().Err(err).Msg("Failed to notify") + } else { + cn.registerUpgradeThread(upgradeHeight, parentMessageID, messageID) + } + } +} + +func (cn *FallbackNotifier) registerUpgradeThread(upgradeHeight int64, parentMessageID, messageID string) { + if upgradeHeight != 0 && parentMessageID == "" { + cn.lock.Lock() + defer cn.lock.Unlock() + + cn.upgradeThreads[upgradeHeight] = messageID + } +} + +func (cn *FallbackNotifier) getParentMessage(ctx context.Context) (parentMessageID string, upgradeHeight int64) { + cn.lock.RLock() + defer cn.lock.RUnlock() + + // check if upgrade height is set + if upgradeHeight := ctx.Value(upgradeHeightKey{}); upgradeHeight != nil { + if parentMessageID, ok := cn.upgradeThreads[upgradeHeight.(int64)]; ok { + return parentMessageID, upgradeHeight.(int64) + } + return "", upgradeHeight.(int64) + } + + return "", 0 +} + +type fallbackNotifierKey struct{} +type upgradeHeightKey struct{} + +// WithUpgradeHeight returns a new context with the upgrade height +func WithUpgradeHeight(ctx context.Context, height int64) context.Context { + return context.WithValue(ctx, upgradeHeightKey{}, height) +} + +// WithContext returns a new context with the fallback notifier +func WithContextFallback(ctx context.Context, notifier *FallbackNotifier) context.Context { + return context.WithValue(ctx, fallbackNotifierKey{}, notifier) +} + +// FromContext returns fallback notifier from context or nil +func FromContextFallback(ctx context.Context) *FallbackNotifier { + notifier := ctx.Value(fallbackNotifierKey{}) + if l, ok := notifier.(*FallbackNotifier); ok { + return l + } + return nil +} diff --git a/internal/pkg/log/notification/notification.go b/internal/pkg/log/notification/notification.go new file mode 100644 index 0000000..6032e2a --- /dev/null +++ b/internal/pkg/log/notification/notification.go @@ -0,0 +1,45 @@ +package notification + +import ( + "blazar/internal/pkg/config" +) + +type Notifier interface { + NotifyInfo(message string, opts ...MsgOption) (string, error) + NotifyWarn(message string, opts ...MsgOption) (string, error) + NotifyErr(message string, opts ...MsgOption) (string, error) +} + +type notifierConfig struct { + parent string + err error +} + +type MsgOption func(*notifierConfig) + +func MsgOptionParent(parentID string) MsgOption { + return func(config *notifierConfig) { + config.parent = parentID + } +} + +func MsgOptionError(err error) MsgOption { + return func(config *notifierConfig) { + config.err = err + } +} + +func NewNotifier(cfg *config.Config, hostname string) Notifier { + if cfg.Slack != nil { + return NewSlackNotifierFromConfig(cfg, hostname) + } + return nil +} + +func optsToConfig(opts []MsgOption) notifierConfig { + var cfg notifierConfig + for _, opt := range opts { + opt(&cfg) + } + return cfg +} diff --git a/internal/pkg/log/notification/slack.go b/internal/pkg/log/notification/slack.go new file mode 100644 index 0000000..05161b8 --- /dev/null +++ b/internal/pkg/log/notification/slack.go @@ -0,0 +1,125 @@ +package notification + +import ( + "fmt" + + "blazar/internal/pkg/config" + + "github.com/slack-go/slack" +) + +type SlackNotifier struct { + composeFile string + hostname string + + // webhook client + webhookURL string + + // bot client + client *slack.Client + channel string + groupMessages bool +} + +func NewSlackNotifierFromConfig(cfg *config.Config, hostname string) *SlackNotifier { + if cfg.Slack.BotNotifier != nil { + return NewSlackBotNotifier( + cfg.Slack.BotNotifier.AuthToken, + cfg.Slack.BotNotifier.Channel, + cfg.ComposeFile, + hostname, + cfg.Slack.BotNotifier.GroupMessages, + ) + } + return NewSlackWebhookNotifier(cfg.Slack.WebhookNotifier.WebhookURL, cfg.ComposeFile, hostname) +} + +func NewSlackWebhookNotifier(webhookURL, composeFile, hostname string) *SlackNotifier { + return &SlackNotifier{ + client: nil, + webhookURL: webhookURL, + hostname: hostname, + composeFile: composeFile, + // using thread messages is only avaialable with slack bot client + // because the webhook doesn't return the thread_ts of the message + groupMessages: false, + } +} + +func NewSlackBotNotifier(token, channel, composeFile, hostname string, groupMessages bool) *SlackNotifier { + return &SlackNotifier{ + client: slack.New(token), + webhookURL: "", + hostname: hostname, + composeFile: composeFile, + channel: channel, + groupMessages: groupMessages, + } +} + +func (s *SlackNotifier) NotifyInfo(message string, opts ...MsgOption) (string, error) { + msg := "ℹ️ " + message + return s.send(msg, opts) +} + +func (s *SlackNotifier) NotifyWarn(message string, opts ...MsgOption) (string, error) { + msg := fmt.Sprintf("⚠️ %s", message) + return s.send(msg, opts) +} + +func (s *SlackNotifier) NotifyErr(message string, opts ...MsgOption) (string, error) { + msg := fmt.Sprintf("🚨 %s", message) + return s.send(msg, opts) +} + +func (s *SlackNotifier) send(message string, opts []MsgOption) (string, error) { + cfg := optsToConfig(opts) + + contextBlock := slack.NewContextBlock( + "context", + slack.NewTextBlockObject(slack.PlainTextType, fmt.Sprintf("hostname: %s", s.hostname), true, false), + slack.NewTextBlockObject(slack.PlainTextType, fmt.Sprintf("compose file: %s", s.composeFile), true, false), + ) + + fallbackMsg := fmt.Sprintf("%s\nhostname: %s\tcompose file: %s", message, s.hostname, s.composeFile) + if cfg.err != nil { + fallbackMsg = fmt.Sprintf("%s\nError: %s", message, cfg.err.Error()) + contextBlock.ContextElements.Elements = append([]slack.MixedElement{ + slack.NewTextBlockObject(slack.PlainTextType, fmt.Sprintf("error: %s", cfg.err.Error()), true, false), + }, contextBlock.ContextElements.Elements...) + } + + payload := slack.Attachment{ + Text: fallbackMsg, + Blocks: slack.Blocks{ + BlockSet: []slack.Block{ + slack.SectionBlock{ + Type: "section", + Text: &slack.TextBlockObject{ + Type: "plain_text", + Text: message, + Emoji: true, + }, + }, + contextBlock, + }, + }, + } + + if s.client != nil { + var options []slack.MsgOption + if s.groupMessages && cfg.parent != "" { + options = append(options, slack.MsgOptionTS(cfg.parent)) + } + + options = append(options, slack.MsgOptionBlocks(payload.Blocks.BlockSet...)) + + _, timestamp, err := s.client.PostMessage(s.channel, options...) + return timestamp, err + } + + msg := slack.WebhookMessage{ + Attachments: []slack.Attachment{payload}, + } + return "", slack.PostWebhook(s.webhookURL, &msg) +} diff --git a/internal/pkg/metrics/metrics.go b/internal/pkg/metrics/metrics.go new file mode 100644 index 0000000..2ba2ac0 --- /dev/null +++ b/internal/pkg/metrics/metrics.go @@ -0,0 +1,98 @@ +package metrics + +import ( + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +const ( + namespace = "blazar" +) + +type Metrics struct { + Up prometheus.Gauge + State *prometheus.GaugeVec + BlocksToUpgrade *prometheus.GaugeVec + UpwErrs prometheus.Counter + UiwErrs prometheus.Counter + HwErrs prometheus.Counter + NotifErrs prometheus.Counter +} + +func NewMetrics(composeFile string, hostname string, version string) (*Metrics, error) { + labels := prometheus.Labels{"hostname": hostname, "compose_file": composeFile, "version": version} + + metrics := &Metrics{ + Up: promauto.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "up", + Help: "Is blazar up?", + ConstLabels: labels, + }, + ), + State: promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "upgrade_state", + Help: "ID of the current stage of the upgrade process", + ConstLabels: labels, + }, + []string{"upgrade_height", "upgrade_name", "proposal_status"}, + ), + BlocksToUpgrade: promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "blocks_to_upgrade_height", + Help: "Number of blocks to the upgrade height", + ConstLabels: labels, + }, + []string{"upgrade_height", "upgrade_name", "proposal_status"}, + ), + UpwErrs: promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "upgrade_proposals_watcher_errors", + Help: "Upgrade proposals watcher error count", + ConstLabels: labels, + }, + ), + UiwErrs: promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "upgrade_info_watcher_errors", + Help: "upgrade-info.json watcher error count", + ConstLabels: labels, + }, + ), + HwErrs: promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "height_watcher_errors", + Help: "Chain height watcher error count", + ConstLabels: labels, + }, + ), + NotifErrs: promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "notifier_errors", + Help: "Notifier error count", + ConstLabels: labels, + }, + ), + } + + return metrics, nil +} + +func RegisterHandler(mux *runtime.ServeMux) error { + handler := promhttp.Handler() + return mux.HandlePath("GET", "/metrics", func(w http.ResponseWriter, r *http.Request, _ map[string]string) { + handler.ServeHTTP(w, r) + }) +} diff --git a/internal/pkg/proto/blazar/blazar.pb.go b/internal/pkg/proto/blazar/blazar.pb.go new file mode 100644 index 0000000..167fcf9 --- /dev/null +++ b/internal/pkg/proto/blazar/blazar.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v5.28.2 +// source: blazar.proto + +package blazar + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetLatestHeightRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetLatestHeightRequest) Reset() { + *x = GetLatestHeightRequest{} + mi := &file_blazar_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetLatestHeightRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLatestHeightRequest) ProtoMessage() {} + +func (x *GetLatestHeightRequest) ProtoReflect() protoreflect.Message { + mi := &file_blazar_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLatestHeightRequest.ProtoReflect.Descriptor instead. +func (*GetLatestHeightRequest) Descriptor() ([]byte, []int) { + return file_blazar_proto_rawDescGZIP(), []int{0} +} + +type GetLatestHeightResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` +} + +func (x *GetLatestHeightResponse) Reset() { + *x = GetLatestHeightResponse{} + mi := &file_blazar_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetLatestHeightResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetLatestHeightResponse) ProtoMessage() {} + +func (x *GetLatestHeightResponse) ProtoReflect() protoreflect.Message { + mi := &file_blazar_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetLatestHeightResponse.ProtoReflect.Descriptor instead. +func (*GetLatestHeightResponse) Descriptor() ([]byte, []int) { + return file_blazar_proto_rawDescGZIP(), []int{1} +} + +func (x *GetLatestHeightResponse) GetHeight() int64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *GetLatestHeightResponse) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +var File_blazar_proto protoreflect.FileDescriptor + +var file_blazar_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x62, 0x6c, 0x61, 0x7a, 0x61, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x18, + 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4c, + 0x61, 0x74, 0x65, 0x73, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x32, 0x51, 0x0a, 0x06, 0x42, 0x6c, 0x61, 0x7a, 0x61, 0x72, 0x12, + 0x47, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x65, 0x73, 0x74, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x17, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x48, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x47, + 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x1b, 0x5a, 0x19, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, + 0x6c, 0x61, 0x7a, 0x61, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_blazar_proto_rawDescOnce sync.Once + file_blazar_proto_rawDescData = file_blazar_proto_rawDesc +) + +func file_blazar_proto_rawDescGZIP() []byte { + file_blazar_proto_rawDescOnce.Do(func() { + file_blazar_proto_rawDescData = protoimpl.X.CompressGZIP(file_blazar_proto_rawDescData) + }) + return file_blazar_proto_rawDescData +} + +var file_blazar_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_blazar_proto_goTypes = []any{ + (*GetLatestHeightRequest)(nil), // 0: GetLatestHeightRequest + (*GetLatestHeightResponse)(nil), // 1: GetLatestHeightResponse +} +var file_blazar_proto_depIdxs = []int32{ + 0, // 0: Blazar.GetLastestHeight:input_type -> GetLatestHeightRequest + 1, // 1: Blazar.GetLastestHeight:output_type -> GetLatestHeightResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_blazar_proto_init() } +func file_blazar_proto_init() { + if File_blazar_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_blazar_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_blazar_proto_goTypes, + DependencyIndexes: file_blazar_proto_depIdxs, + MessageInfos: file_blazar_proto_msgTypes, + }.Build() + File_blazar_proto = out.File + file_blazar_proto_rawDesc = nil + file_blazar_proto_goTypes = nil + file_blazar_proto_depIdxs = nil +} diff --git a/internal/pkg/proto/blazar/blazar.pb.gw.go b/internal/pkg/proto/blazar/blazar.pb.gw.go new file mode 100644 index 0000000..e11855e --- /dev/null +++ b/internal/pkg/proto/blazar/blazar.pb.gw.go @@ -0,0 +1,163 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: blazar.proto + +/* +Package blazar is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package blazar + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_Blazar_GetLastestHeight_0(ctx context.Context, marshaler runtime.Marshaler, client BlazarClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetLatestHeightRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetLastestHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Blazar_GetLastestHeight_0(ctx context.Context, marshaler runtime.Marshaler, server BlazarServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetLatestHeightRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetLastestHeight(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterBlazarHandlerServer registers the http handlers for service Blazar to "mux". +// UnaryRPC :call BlazarServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterBlazarHandlerFromEndpoint instead. +func RegisterBlazarHandlerServer(ctx context.Context, mux *runtime.ServeMux, server BlazarServer) error { + + mux.Handle("POST", pattern_Blazar_GetLastestHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/.Blazar/GetLastestHeight", runtime.WithHTTPPathPattern("/Blazar/GetLastestHeight")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Blazar_GetLastestHeight_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_Blazar_GetLastestHeight_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterBlazarHandlerFromEndpoint is same as RegisterBlazarHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterBlazarHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterBlazarHandler(ctx, mux, conn) +} + +// RegisterBlazarHandler registers the http handlers for service Blazar to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterBlazarHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterBlazarHandlerClient(ctx, mux, NewBlazarClient(conn)) +} + +// RegisterBlazarHandlerClient registers the http handlers for service Blazar +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "BlazarClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "BlazarClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "BlazarClient" to call the correct interceptors. +func RegisterBlazarHandlerClient(ctx context.Context, mux *runtime.ServeMux, client BlazarClient) error { + + mux.Handle("POST", pattern_Blazar_GetLastestHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/.Blazar/GetLastestHeight", runtime.WithHTTPPathPattern("/Blazar/GetLastestHeight")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Blazar_GetLastestHeight_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_Blazar_GetLastestHeight_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Blazar_GetLastestHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"Blazar", "GetLastestHeight"}, "")) +) + +var ( + forward_Blazar_GetLastestHeight_0 = runtime.ForwardResponseMessage +) diff --git a/internal/pkg/proto/blazar/blazar_grpc.pb.go b/internal/pkg/proto/blazar/blazar_grpc.pb.go new file mode 100644 index 0000000..39a53df --- /dev/null +++ b/internal/pkg/proto/blazar/blazar_grpc.pb.go @@ -0,0 +1,121 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.28.2 +// source: blazar.proto + +package blazar + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Blazar_GetLastestHeight_FullMethodName = "/Blazar/GetLastestHeight" +) + +// BlazarClient is the client API for Blazar service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type BlazarClient interface { + GetLastestHeight(ctx context.Context, in *GetLatestHeightRequest, opts ...grpc.CallOption) (*GetLatestHeightResponse, error) +} + +type blazarClient struct { + cc grpc.ClientConnInterface +} + +func NewBlazarClient(cc grpc.ClientConnInterface) BlazarClient { + return &blazarClient{cc} +} + +func (c *blazarClient) GetLastestHeight(ctx context.Context, in *GetLatestHeightRequest, opts ...grpc.CallOption) (*GetLatestHeightResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetLatestHeightResponse) + err := c.cc.Invoke(ctx, Blazar_GetLastestHeight_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BlazarServer is the server API for Blazar service. +// All implementations must embed UnimplementedBlazarServer +// for forward compatibility. +type BlazarServer interface { + GetLastestHeight(context.Context, *GetLatestHeightRequest) (*GetLatestHeightResponse, error) + mustEmbedUnimplementedBlazarServer() +} + +// UnimplementedBlazarServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedBlazarServer struct{} + +func (UnimplementedBlazarServer) GetLastestHeight(context.Context, *GetLatestHeightRequest) (*GetLatestHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetLastestHeight not implemented") +} +func (UnimplementedBlazarServer) mustEmbedUnimplementedBlazarServer() {} +func (UnimplementedBlazarServer) testEmbeddedByValue() {} + +// UnsafeBlazarServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to BlazarServer will +// result in compilation errors. +type UnsafeBlazarServer interface { + mustEmbedUnimplementedBlazarServer() +} + +func RegisterBlazarServer(s grpc.ServiceRegistrar, srv BlazarServer) { + // If the following call pancis, it indicates UnimplementedBlazarServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Blazar_ServiceDesc, srv) +} + +func _Blazar_GetLastestHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetLatestHeightRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlazarServer).GetLastestHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Blazar_GetLastestHeight_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlazarServer).GetLastestHeight(ctx, req.(*GetLatestHeightRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Blazar_ServiceDesc is the grpc.ServiceDesc for Blazar service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Blazar_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "Blazar", + HandlerType: (*BlazarServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetLastestHeight", + Handler: _Blazar_GetLastestHeight_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "blazar.proto", +} diff --git a/internal/pkg/proto/daemon/checks.pb.go b/internal/pkg/proto/daemon/checks.pb.go new file mode 100644 index 0000000..7cad74d --- /dev/null +++ b/internal/pkg/proto/daemon/checks.pb.go @@ -0,0 +1,245 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v5.28.2 +// source: checks.proto + +package daemon + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PreCheck int32 + +const ( + // Try to fetch the docker image before upgrade + PreCheck_PULL_DOCKER_IMAGE PreCheck = 0 + // Set the node's halt-height before non-governance coordinated upgrades + PreCheck_SET_HALT_HEIGHT PreCheck = 1 +) + +// Enum value maps for PreCheck. +var ( + PreCheck_name = map[int32]string{ + 0: "PULL_DOCKER_IMAGE", + 1: "SET_HALT_HEIGHT", + } + PreCheck_value = map[string]int32{ + "PULL_DOCKER_IMAGE": 0, + "SET_HALT_HEIGHT": 1, + } +) + +func (x PreCheck) Enum() *PreCheck { + p := new(PreCheck) + *p = x + return p +} + +func (x PreCheck) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PreCheck) Descriptor() protoreflect.EnumDescriptor { + return file_checks_proto_enumTypes[0].Descriptor() +} + +func (PreCheck) Type() protoreflect.EnumType { + return &file_checks_proto_enumTypes[0] +} + +func (x PreCheck) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PreCheck.Descriptor instead. +func (PreCheck) EnumDescriptor() ([]byte, []int) { + return file_checks_proto_rawDescGZIP(), []int{0} +} + +type PostCheck int32 + +const ( + // Check if the gRPC endpoint is reachable + PostCheck_GRPC_RESPONSIVE PostCheck = 0 + // Check if node reached the next block height + PostCheck_CHAIN_HEIGHT_INCREASED PostCheck = 1 + // Check if we signed the first block post upgrade + PostCheck_FIRST_BLOCK_VOTED PostCheck = 2 +) + +// Enum value maps for PostCheck. +var ( + PostCheck_name = map[int32]string{ + 0: "GRPC_RESPONSIVE", + 1: "CHAIN_HEIGHT_INCREASED", + 2: "FIRST_BLOCK_VOTED", + } + PostCheck_value = map[string]int32{ + "GRPC_RESPONSIVE": 0, + "CHAIN_HEIGHT_INCREASED": 1, + "FIRST_BLOCK_VOTED": 2, + } +) + +func (x PostCheck) Enum() *PostCheck { + p := new(PostCheck) + *p = x + return p +} + +func (x PostCheck) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PostCheck) Descriptor() protoreflect.EnumDescriptor { + return file_checks_proto_enumTypes[1].Descriptor() +} + +func (PostCheck) Type() protoreflect.EnumType { + return &file_checks_proto_enumTypes[1] +} + +func (x PostCheck) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PostCheck.Descriptor instead. +func (PostCheck) EnumDescriptor() ([]byte, []int) { + return file_checks_proto_rawDescGZIP(), []int{1} +} + +type CheckStatus int32 + +const ( + // Check is waiting to be executed + CheckStatus_PENDING CheckStatus = 0 + // Check is currently being executed + CheckStatus_RUNNING CheckStatus = 1 + // Check execution has finished + CheckStatus_FINISHED CheckStatus = 2 +) + +// Enum value maps for CheckStatus. +var ( + CheckStatus_name = map[int32]string{ + 0: "PENDING", + 1: "RUNNING", + 2: "FINISHED", + } + CheckStatus_value = map[string]int32{ + "PENDING": 0, + "RUNNING": 1, + "FINISHED": 2, + } +) + +func (x CheckStatus) Enum() *CheckStatus { + p := new(CheckStatus) + *p = x + return p +} + +func (x CheckStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CheckStatus) Descriptor() protoreflect.EnumDescriptor { + return file_checks_proto_enumTypes[2].Descriptor() +} + +func (CheckStatus) Type() protoreflect.EnumType { + return &file_checks_proto_enumTypes[2] +} + +func (x CheckStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CheckStatus.Descriptor instead. +func (CheckStatus) EnumDescriptor() ([]byte, []int) { + return file_checks_proto_rawDescGZIP(), []int{2} +} + +var File_checks_proto protoreflect.FileDescriptor + +var file_checks_proto_rawDesc = []byte{ + 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, 0x36, + 0x0a, 0x08, 0x50, 0x72, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x15, 0x0a, 0x11, 0x50, 0x55, + 0x4c, 0x4c, 0x5f, 0x44, 0x4f, 0x43, 0x4b, 0x45, 0x52, 0x5f, 0x49, 0x4d, 0x41, 0x47, 0x45, 0x10, + 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x41, 0x4c, 0x54, 0x5f, 0x48, 0x45, + 0x49, 0x47, 0x48, 0x54, 0x10, 0x01, 0x2a, 0x53, 0x0a, 0x09, 0x50, 0x6f, 0x73, 0x74, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x52, 0x50, 0x43, 0x5f, 0x52, 0x45, 0x53, 0x50, + 0x4f, 0x4e, 0x53, 0x49, 0x56, 0x45, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x48, 0x41, 0x49, + 0x4e, 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x43, 0x52, 0x45, 0x41, 0x53, + 0x45, 0x44, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x42, 0x4c, + 0x4f, 0x43, 0x4b, 0x5f, 0x56, 0x4f, 0x54, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x35, 0x0a, 0x0b, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x45, + 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, + 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x46, 0x49, 0x4e, 0x49, 0x53, 0x48, 0x45, 0x44, + 0x10, 0x02, 0x42, 0x1b, 0x5a, 0x19, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, + 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_checks_proto_rawDescOnce sync.Once + file_checks_proto_rawDescData = file_checks_proto_rawDesc +) + +func file_checks_proto_rawDescGZIP() []byte { + file_checks_proto_rawDescOnce.Do(func() { + file_checks_proto_rawDescData = protoimpl.X.CompressGZIP(file_checks_proto_rawDescData) + }) + return file_checks_proto_rawDescData +} + +var file_checks_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_checks_proto_goTypes = []any{ + (PreCheck)(0), // 0: PreCheck + (PostCheck)(0), // 1: PostCheck + (CheckStatus)(0), // 2: CheckStatus +} +var file_checks_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_checks_proto_init() } +func file_checks_proto_init() { + if File_checks_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_checks_proto_rawDesc, + NumEnums: 3, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_checks_proto_goTypes, + DependencyIndexes: file_checks_proto_depIdxs, + EnumInfos: file_checks_proto_enumTypes, + }.Build() + File_checks_proto = out.File + file_checks_proto_rawDesc = nil + file_checks_proto_goTypes = nil + file_checks_proto_depIdxs = nil +} diff --git a/internal/pkg/proto/upgrades_registry/upgrades_registry.pb.go b/internal/pkg/proto/upgrades_registry/upgrades_registry.pb.go new file mode 100644 index 0000000..9bea2cf --- /dev/null +++ b/internal/pkg/proto/upgrades_registry/upgrades_registry.pb.go @@ -0,0 +1,1069 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v5.28.2 +// source: upgrades_registry.proto + +package upgrades_registry + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type UpgradeStep int32 + +const ( + // NONE is the default step of an upgrade. It means that the upgrade is not being executed + UpgradeStep_NONE UpgradeStep = 0 + // MONITORING means that blazar sees the upcoming upgrade and is monitoring the chain for the upgrade height + UpgradeStep_MONITORING UpgradeStep = 1 + // DOCKER_COMPOSE_FILE_UPGRADE indicates the blazar is executing the core part of the upgrade vua docker compose + UpgradeStep_COMPOSE_FILE_UPGRADE UpgradeStep = 2 + // PRE_UPGRADE_CHECK indicates that the blazar is executing the pre-upgrade checks + UpgradeStep_PRE_UPGRADE_CHECK UpgradeStep = 3 + // POST_UPGRADE_CHECK indicates that the blazar is executing the post-upgrade checks + UpgradeStep_POST_UPGRADE_CHECK UpgradeStep = 4 +) + +// Enum value maps for UpgradeStep. +var ( + UpgradeStep_name = map[int32]string{ + 0: "NONE", + 1: "MONITORING", + 2: "COMPOSE_FILE_UPGRADE", + 3: "PRE_UPGRADE_CHECK", + 4: "POST_UPGRADE_CHECK", + } + UpgradeStep_value = map[string]int32{ + "NONE": 0, + "MONITORING": 1, + "COMPOSE_FILE_UPGRADE": 2, + "PRE_UPGRADE_CHECK": 3, + "POST_UPGRADE_CHECK": 4, + } +) + +func (x UpgradeStep) Enum() *UpgradeStep { + p := new(UpgradeStep) + *p = x + return p +} + +func (x UpgradeStep) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UpgradeStep) Descriptor() protoreflect.EnumDescriptor { + return file_upgrades_registry_proto_enumTypes[0].Descriptor() +} + +func (UpgradeStep) Type() protoreflect.EnumType { + return &file_upgrades_registry_proto_enumTypes[0] +} + +func (x UpgradeStep) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UpgradeStep.Descriptor instead. +func (UpgradeStep) EnumDescriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{0} +} + +type UpgradeStatus int32 + +const ( + // UNKNOWN is the default status of an upgrade. It means that the status + UpgradeStatus_UNKNOWN UpgradeStatus = 0 + // SCHEDULED is the initial status of an upgrade. It means that the + // upgrade is registered with the registry but it's not active yet. + // + // An upgrade coming from the chain governance that is still being voted on, is marked as scheduled + UpgradeStatus_SCHEDULED UpgradeStatus = 1 + // ACTIVE means that the upgrade is acknowledged by network governance or a user and is ready to be executed. + UpgradeStatus_ACTIVE UpgradeStatus = 2 + // EXECUTING means that the upgrade is currently being executed. The height is reached. + UpgradeStatus_EXECUTING UpgradeStatus = 3 + // COMPLETED means that the upgrade has been successfully executed. + UpgradeStatus_COMPLETED UpgradeStatus = 4 + // FAILED means that the upgrade has failed to execute. + UpgradeStatus_FAILED UpgradeStatus = 5 + // CANCELLED means that the upgrade has been cancelled by a user or the network + UpgradeStatus_CANCELLED UpgradeStatus = 6 + // EXPIRED means that the upgrade time has passed and blazar did not do anything about it (e.g historical upgrade from the chain governance) + UpgradeStatus_EXPIRED UpgradeStatus = 7 +) + +// Enum value maps for UpgradeStatus. +var ( + UpgradeStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SCHEDULED", + 2: "ACTIVE", + 3: "EXECUTING", + 4: "COMPLETED", + 5: "FAILED", + 6: "CANCELLED", + 7: "EXPIRED", + } + UpgradeStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SCHEDULED": 1, + "ACTIVE": 2, + "EXECUTING": 3, + "COMPLETED": 4, + "FAILED": 5, + "CANCELLED": 6, + "EXPIRED": 7, + } +) + +func (x UpgradeStatus) Enum() *UpgradeStatus { + p := new(UpgradeStatus) + *p = x + return p +} + +func (x UpgradeStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UpgradeStatus) Descriptor() protoreflect.EnumDescriptor { + return file_upgrades_registry_proto_enumTypes[1].Descriptor() +} + +func (UpgradeStatus) Type() protoreflect.EnumType { + return &file_upgrades_registry_proto_enumTypes[1] +} + +func (x UpgradeStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UpgradeStatus.Descriptor instead. +func (UpgradeStatus) EnumDescriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{1} +} + +type UpgradeType int32 + +const ( + // GOVERNANCE is a coordinated upgrade that is initiated by the chain + // governance. The upgrade is expected to be coordinated across all + // validators at specific height. + // + // Requirements: + // * there is an onchain governance proposal that has passed + UpgradeType_GOVERNANCE UpgradeType = 0 + // NON_GOVERNANCE_COORDINATED the upgrade is not coming from the chain, + // but rather is initiated by the operators. + // + // Requirements: + // * there should be no onchain governance proposal + // * the upgrade is expected to happen at the same height for all validators (usually it's a state breaking change) + UpgradeType_NON_GOVERNANCE_COORDINATED UpgradeType = 1 + // NON_GOVERNANCE_UNCOORDINATED the upgrade is not coming from the chain, + // but rather is initiated by the operators. + // + // Requirements: + // * there should be no onchain governance proposal + // * the upgrade is not expected to happen at any specific height. Validators are free to upgrade at their own pace. (usually non-state breaking changes) + UpgradeType_NON_GOVERNANCE_UNCOORDINATED UpgradeType = 2 +) + +// Enum value maps for UpgradeType. +var ( + UpgradeType_name = map[int32]string{ + 0: "GOVERNANCE", + 1: "NON_GOVERNANCE_COORDINATED", + 2: "NON_GOVERNANCE_UNCOORDINATED", + } + UpgradeType_value = map[string]int32{ + "GOVERNANCE": 0, + "NON_GOVERNANCE_COORDINATED": 1, + "NON_GOVERNANCE_UNCOORDINATED": 2, + } +) + +func (x UpgradeType) Enum() *UpgradeType { + p := new(UpgradeType) + *p = x + return p +} + +func (x UpgradeType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UpgradeType) Descriptor() protoreflect.EnumDescriptor { + return file_upgrades_registry_proto_enumTypes[2].Descriptor() +} + +func (UpgradeType) Type() protoreflect.EnumType { + return &file_upgrades_registry_proto_enumTypes[2] +} + +func (x UpgradeType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UpgradeType.Descriptor instead. +func (UpgradeType) EnumDescriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{2} +} + +type ProviderType int32 + +const ( + // CHAIN means that the upgrade is coming from onchain governance + ProviderType_CHAIN ProviderType = 0 + // LOCAL means that the upgrade is coming from blazar local storage + ProviderType_LOCAL ProviderType = 1 + // DATABASE means that the upgrade is coming from the database (e.g PostgreSQL) + ProviderType_DATABASE ProviderType = 2 +) + +// Enum value maps for ProviderType. +var ( + ProviderType_name = map[int32]string{ + 0: "CHAIN", + 1: "LOCAL", + 2: "DATABASE", + } + ProviderType_value = map[string]int32{ + "CHAIN": 0, + "LOCAL": 1, + "DATABASE": 2, + } +) + +func (x ProviderType) Enum() *ProviderType { + p := new(ProviderType) + *p = x + return p +} + +func (x ProviderType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ProviderType) Descriptor() protoreflect.EnumDescriptor { + return file_upgrades_registry_proto_enumTypes[3].Descriptor() +} + +func (ProviderType) Type() protoreflect.EnumType { + return &file_upgrades_registry_proto_enumTypes[3] +} + +func (x ProviderType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ProviderType.Descriptor instead. +func (ProviderType) EnumDescriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{3} +} + +type Upgrade struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // the height at which the upgrade is expected to happen + + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty" gorm:"primaryKey;not null"` + // docker image tag + + Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty" gorm:"type:text;not null"` + // cosmos network name (e.g. cosmoshub) or chain id (e.g. cosmoshub-4) + + Network string `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty" gorm:"primaryKey;type:text;not null"` + // the short title of the upgrade (e.g. "Coordinated upgrade to v0.42.4 announced on discord channel #announcements") + + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty" gorm:"type:text;not null"` + // type of the upgrade (defines what checks and actions should be taken) + + Type UpgradeType `protobuf:"varint,5,opt,name=type,proto3,enum=UpgradeType" json:"type,omitempty" gorm:"not null"` + // status of the upgrade (DONT set this field manually, it's managed by the registry) + + Status UpgradeStatus `protobuf:"varint,6,opt,name=status,proto3,enum=UpgradeStatus" json:"status,omitempty" gorm:"default:0;not null"` + // current execution step (DONT set this field manually, it's managed by the registry) + + Step UpgradeStep `protobuf:"varint,7,opt,name=step,proto3,enum=UpgradeStep" json:"step,omitempty" gorm:"default:0;not null"` + // priority of the upgrade (highest priority wins) + + Priority int32 `protobuf:"varint,8,opt,name=priority,proto3" json:"priority,omitempty" gorm:"primaryKey"` + // source of the upgrade + + Source ProviderType `protobuf:"varint,9,opt,name=source,proto3,enum=ProviderType" json:"source,omitempty" gorm:"not null"` + // propoal id associated with the upgrade + ProposalId *int64 `protobuf:"varint,10,opt,name=proposal_id,json=proposalId,proto3,oneof" json:"proposal_id,omitempty"` +} + +func (x *Upgrade) Reset() { + *x = Upgrade{} + mi := &file_upgrades_registry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Upgrade) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Upgrade) ProtoMessage() {} + +func (x *Upgrade) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Upgrade.ProtoReflect.Descriptor instead. +func (*Upgrade) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{0} +} + +func (x *Upgrade) GetHeight() int64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *Upgrade) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *Upgrade) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +func (x *Upgrade) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Upgrade) GetType() UpgradeType { + if x != nil { + return x.Type + } + return UpgradeType_GOVERNANCE +} + +func (x *Upgrade) GetStatus() UpgradeStatus { + if x != nil { + return x.Status + } + return UpgradeStatus_UNKNOWN +} + +func (x *Upgrade) GetStep() UpgradeStep { + if x != nil { + return x.Step + } + return UpgradeStep_NONE +} + +func (x *Upgrade) GetPriority() int32 { + if x != nil { + return x.Priority + } + return 0 +} + +func (x *Upgrade) GetSource() ProviderType { + if x != nil { + return x.Source + } + return ProviderType_CHAIN +} + +func (x *Upgrade) GetProposalId() int64 { + if x != nil && x.ProposalId != nil { + return *x.ProposalId + } + return 0 +} + +// This is the structure of /blazar/upgrades.json +type Upgrades struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Upgrades []*Upgrade `protobuf:"bytes,1,rep,name=upgrades,proto3" json:"upgrades,omitempty"` +} + +func (x *Upgrades) Reset() { + *x = Upgrades{} + mi := &file_upgrades_registry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Upgrades) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Upgrades) ProtoMessage() {} + +func (x *Upgrades) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Upgrades.ProtoReflect.Descriptor instead. +func (*Upgrades) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{1} +} + +func (x *Upgrades) GetUpgrades() []*Upgrade { + if x != nil { + return x.Upgrades + } + return nil +} + +type AddUpgradeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The new upgrade to be registered + Upgrade *Upgrade `protobuf:"bytes,1,opt,name=upgrade,proto3" json:"upgrade,omitempty"` + // If set to true, the upgrade will be overwritten if it already exists + Overwrite bool `protobuf:"varint,2,opt,name=overwrite,proto3" json:"overwrite,omitempty"` +} + +func (x *AddUpgradeRequest) Reset() { + *x = AddUpgradeRequest{} + mi := &file_upgrades_registry_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AddUpgradeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddUpgradeRequest) ProtoMessage() {} + +func (x *AddUpgradeRequest) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddUpgradeRequest.ProtoReflect.Descriptor instead. +func (*AddUpgradeRequest) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{2} +} + +func (x *AddUpgradeRequest) GetUpgrade() *Upgrade { + if x != nil { + return x.Upgrade + } + return nil +} + +func (x *AddUpgradeRequest) GetOverwrite() bool { + if x != nil { + return x.Overwrite + } + return false +} + +type AddUpgradeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AddUpgradeResponse) Reset() { + *x = AddUpgradeResponse{} + mi := &file_upgrades_registry_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AddUpgradeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddUpgradeResponse) ProtoMessage() {} + +func (x *AddUpgradeResponse) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddUpgradeResponse.ProtoReflect.Descriptor instead. +func (*AddUpgradeResponse) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{3} +} + +type ListUpgradesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DisableCache bool `protobuf:"varint,1,opt,name=disable_cache,json=disableCache,proto3" json:"disable_cache,omitempty"` + Height *int64 `protobuf:"varint,2,opt,name=height,proto3,oneof" json:"height,omitempty"` + Type *UpgradeType `protobuf:"varint,3,opt,name=type,proto3,enum=UpgradeType,oneof" json:"type,omitempty"` + Source *ProviderType `protobuf:"varint,4,opt,name=source,proto3,enum=ProviderType,oneof" json:"source,omitempty"` + Status []UpgradeStatus `protobuf:"varint,5,rep,packed,name=status,proto3,enum=UpgradeStatus" json:"status,omitempty"` + Limit *int64 `protobuf:"varint,6,opt,name=limit,proto3,oneof" json:"limit,omitempty"` +} + +func (x *ListUpgradesRequest) Reset() { + *x = ListUpgradesRequest{} + mi := &file_upgrades_registry_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListUpgradesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUpgradesRequest) ProtoMessage() {} + +func (x *ListUpgradesRequest) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUpgradesRequest.ProtoReflect.Descriptor instead. +func (*ListUpgradesRequest) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{4} +} + +func (x *ListUpgradesRequest) GetDisableCache() bool { + if x != nil { + return x.DisableCache + } + return false +} + +func (x *ListUpgradesRequest) GetHeight() int64 { + if x != nil && x.Height != nil { + return *x.Height + } + return 0 +} + +func (x *ListUpgradesRequest) GetType() UpgradeType { + if x != nil && x.Type != nil { + return *x.Type + } + return UpgradeType_GOVERNANCE +} + +func (x *ListUpgradesRequest) GetSource() ProviderType { + if x != nil && x.Source != nil { + return *x.Source + } + return ProviderType_CHAIN +} + +func (x *ListUpgradesRequest) GetStatus() []UpgradeStatus { + if x != nil { + return x.Status + } + return nil +} + +func (x *ListUpgradesRequest) GetLimit() int64 { + if x != nil && x.Limit != nil { + return *x.Limit + } + return 0 +} + +type ListUpgradesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Upgrades []*Upgrade `protobuf:"bytes,1,rep,name=upgrades,proto3" json:"upgrades,omitempty"` +} + +func (x *ListUpgradesResponse) Reset() { + *x = ListUpgradesResponse{} + mi := &file_upgrades_registry_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListUpgradesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUpgradesResponse) ProtoMessage() {} + +func (x *ListUpgradesResponse) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUpgradesResponse.ProtoReflect.Descriptor instead. +func (*ListUpgradesResponse) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{5} +} + +func (x *ListUpgradesResponse) GetUpgrades() []*Upgrade { + if x != nil { + return x.Upgrades + } + return nil +} + +type CancelUpgradeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Source ProviderType `protobuf:"varint,2,opt,name=source,proto3,enum=ProviderType" json:"source,omitempty"` + // if set to true, the upgrade is cancelled through the state machine, in this case 'source' is ignored + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` +} + +func (x *CancelUpgradeRequest) Reset() { + *x = CancelUpgradeRequest{} + mi := &file_upgrades_registry_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CancelUpgradeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelUpgradeRequest) ProtoMessage() {} + +func (x *CancelUpgradeRequest) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelUpgradeRequest.ProtoReflect.Descriptor instead. +func (*CancelUpgradeRequest) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{6} +} + +func (x *CancelUpgradeRequest) GetHeight() int64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *CancelUpgradeRequest) GetSource() ProviderType { + if x != nil { + return x.Source + } + return ProviderType_CHAIN +} + +func (x *CancelUpgradeRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +type CancelUpgradeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CancelUpgradeResponse) Reset() { + *x = CancelUpgradeResponse{} + mi := &file_upgrades_registry_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CancelUpgradeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelUpgradeResponse) ProtoMessage() {} + +func (x *CancelUpgradeResponse) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelUpgradeResponse.ProtoReflect.Descriptor instead. +func (*CancelUpgradeResponse) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{7} +} + +// ForceSyncRequest is used to force the registry to sync the upgrades from all registered providers +type ForceSyncRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ForceSyncRequest) Reset() { + *x = ForceSyncRequest{} + mi := &file_upgrades_registry_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceSyncRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceSyncRequest) ProtoMessage() {} + +func (x *ForceSyncRequest) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceSyncRequest.ProtoReflect.Descriptor instead. +func (*ForceSyncRequest) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{8} +} + +type ForceSyncResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // the height at which the registry is currently synced + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (x *ForceSyncResponse) Reset() { + *x = ForceSyncResponse{} + mi := &file_upgrades_registry_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceSyncResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceSyncResponse) ProtoMessage() {} + +func (x *ForceSyncResponse) ProtoReflect() protoreflect.Message { + mi := &file_upgrades_registry_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceSyncResponse.ProtoReflect.Descriptor instead. +func (*ForceSyncResponse) Descriptor() ([]byte, []int) { + return file_upgrades_registry_proto_rawDescGZIP(), []int{9} +} + +func (x *ForceSyncResponse) GetHeight() int64 { + if x != nil { + return x.Height + } + return 0 +} + +var File_upgrades_registry_proto protoreflect.FileDescriptor + +var file_upgrades_registry_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc6, 0x02, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, + 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x18, 0x0a, + 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, + 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x73, 0x74, 0x65, 0x70, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x74, 0x65, + 0x70, 0x52, 0x04, 0x73, 0x74, 0x65, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0b, 0x70, 0x72, + 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x00, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x49, 0x64, 0x88, 0x01, 0x01, + 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x5f, 0x69, 0x64, + 0x22, 0x30, 0x0a, 0x08, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x08, + 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, + 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x08, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x73, 0x22, 0x55, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x52, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6f, + 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x41, 0x64, 0x64, + 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x96, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1b, 0x0a, 0x06, + 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x06, + 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x01, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, + 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x0d, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x48, + 0x02, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x88, 0x01, 0x01, 0x12, 0x26, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x55, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x42, + 0x09, 0x0a, 0x07, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, + 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x3c, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, + 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x24, 0x0a, 0x08, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x08, 0x75, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x22, 0x6b, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, + 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x22, 0x17, 0x0a, 0x15, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x12, 0x0a, 0x10, + 0x46, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x22, 0x2b, 0x0a, 0x11, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x2a, 0x70, 0x0a, + 0x0b, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x74, 0x65, 0x70, 0x12, 0x08, 0x0a, 0x04, + 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x4f, 0x4e, 0x49, 0x54, 0x4f, + 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x53, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x5f, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x45, 0x10, 0x02, + 0x12, 0x15, 0x0a, 0x11, 0x50, 0x52, 0x45, 0x5f, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x45, 0x5f, + 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x4f, 0x53, 0x54, 0x5f, + 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x04, 0x2a, + 0x7d, 0x0a, 0x0d, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x53, 0x43, 0x48, 0x45, 0x44, 0x55, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, + 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x45, 0x58, 0x45, 0x43, + 0x55, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x50, 0x4c, + 0x45, 0x54, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, + 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, + 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x58, 0x50, 0x49, 0x52, 0x45, 0x44, 0x10, 0x07, 0x2a, 0x5f, + 0x0a, 0x0b, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, + 0x0a, 0x47, 0x4f, 0x56, 0x45, 0x52, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x10, 0x00, 0x12, 0x1e, 0x0a, + 0x1a, 0x4e, 0x4f, 0x4e, 0x5f, 0x47, 0x4f, 0x56, 0x45, 0x52, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x5f, + 0x43, 0x4f, 0x4f, 0x52, 0x44, 0x49, 0x4e, 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x20, 0x0a, + 0x1c, 0x4e, 0x4f, 0x4e, 0x5f, 0x47, 0x4f, 0x56, 0x45, 0x52, 0x4e, 0x41, 0x4e, 0x43, 0x45, 0x5f, + 0x55, 0x4e, 0x43, 0x4f, 0x4f, 0x52, 0x44, 0x49, 0x4e, 0x41, 0x54, 0x45, 0x44, 0x10, 0x02, 0x2a, + 0x32, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x09, 0x0a, 0x05, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x4f, + 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x41, 0x54, 0x41, 0x42, 0x41, 0x53, + 0x45, 0x10, 0x02, 0x32, 0xf5, 0x02, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x52, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x12, 0x2e, 0x41, 0x64, 0x64, 0x55, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x41, 0x64, 0x64, 0x55, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x3a, 0x01, 0x2a, 0x22, 0x10, 0x2f, 0x76, 0x31, 0x2f, 0x75, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x2f, 0x61, 0x64, 0x64, 0x12, 0x56, 0x0a, 0x0c, 0x4c, + 0x69, 0x73, 0x74, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x12, 0x14, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x15, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, + 0x12, 0x11, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x2f, 0x6c, + 0x69, 0x73, 0x74, 0x12, 0x5e, 0x0a, 0x0d, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x12, 0x15, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x55, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x3a, 0x01, 0x2a, 0x22, 0x13, + 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x2f, 0x63, 0x61, 0x6e, + 0x63, 0x65, 0x6c, 0x12, 0x56, 0x0a, 0x09, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x79, 0x6e, 0x63, + 0x12, 0x11, 0x2e, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x3a, + 0x01, 0x2a, 0x22, 0x17, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, + 0x2f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x42, 0x26, 0x5a, 0x24, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_upgrades_registry_proto_rawDescOnce sync.Once + file_upgrades_registry_proto_rawDescData = file_upgrades_registry_proto_rawDesc +) + +func file_upgrades_registry_proto_rawDescGZIP() []byte { + file_upgrades_registry_proto_rawDescOnce.Do(func() { + file_upgrades_registry_proto_rawDescData = protoimpl.X.CompressGZIP(file_upgrades_registry_proto_rawDescData) + }) + return file_upgrades_registry_proto_rawDescData +} + +var file_upgrades_registry_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_upgrades_registry_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_upgrades_registry_proto_goTypes = []any{ + (UpgradeStep)(0), // 0: UpgradeStep + (UpgradeStatus)(0), // 1: UpgradeStatus + (UpgradeType)(0), // 2: UpgradeType + (ProviderType)(0), // 3: ProviderType + (*Upgrade)(nil), // 4: Upgrade + (*Upgrades)(nil), // 5: Upgrades + (*AddUpgradeRequest)(nil), // 6: AddUpgradeRequest + (*AddUpgradeResponse)(nil), // 7: AddUpgradeResponse + (*ListUpgradesRequest)(nil), // 8: ListUpgradesRequest + (*ListUpgradesResponse)(nil), // 9: ListUpgradesResponse + (*CancelUpgradeRequest)(nil), // 10: CancelUpgradeRequest + (*CancelUpgradeResponse)(nil), // 11: CancelUpgradeResponse + (*ForceSyncRequest)(nil), // 12: ForceSyncRequest + (*ForceSyncResponse)(nil), // 13: ForceSyncResponse +} +var file_upgrades_registry_proto_depIdxs = []int32{ + 2, // 0: Upgrade.type:type_name -> UpgradeType + 1, // 1: Upgrade.status:type_name -> UpgradeStatus + 0, // 2: Upgrade.step:type_name -> UpgradeStep + 3, // 3: Upgrade.source:type_name -> ProviderType + 4, // 4: Upgrades.upgrades:type_name -> Upgrade + 4, // 5: AddUpgradeRequest.upgrade:type_name -> Upgrade + 2, // 6: ListUpgradesRequest.type:type_name -> UpgradeType + 3, // 7: ListUpgradesRequest.source:type_name -> ProviderType + 1, // 8: ListUpgradesRequest.status:type_name -> UpgradeStatus + 4, // 9: ListUpgradesResponse.upgrades:type_name -> Upgrade + 3, // 10: CancelUpgradeRequest.source:type_name -> ProviderType + 6, // 11: UpgradeRegistry.AddUpgrade:input_type -> AddUpgradeRequest + 8, // 12: UpgradeRegistry.ListUpgrades:input_type -> ListUpgradesRequest + 10, // 13: UpgradeRegistry.CancelUpgrade:input_type -> CancelUpgradeRequest + 12, // 14: UpgradeRegistry.ForceSync:input_type -> ForceSyncRequest + 7, // 15: UpgradeRegistry.AddUpgrade:output_type -> AddUpgradeResponse + 9, // 16: UpgradeRegistry.ListUpgrades:output_type -> ListUpgradesResponse + 11, // 17: UpgradeRegistry.CancelUpgrade:output_type -> CancelUpgradeResponse + 13, // 18: UpgradeRegistry.ForceSync:output_type -> ForceSyncResponse + 15, // [15:19] is the sub-list for method output_type + 11, // [11:15] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_upgrades_registry_proto_init() } +func file_upgrades_registry_proto_init() { + if File_upgrades_registry_proto != nil { + return + } + file_upgrades_registry_proto_msgTypes[0].OneofWrappers = []any{} + file_upgrades_registry_proto_msgTypes[4].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_upgrades_registry_proto_rawDesc, + NumEnums: 4, + NumMessages: 10, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_upgrades_registry_proto_goTypes, + DependencyIndexes: file_upgrades_registry_proto_depIdxs, + EnumInfos: file_upgrades_registry_proto_enumTypes, + MessageInfos: file_upgrades_registry_proto_msgTypes, + }.Build() + File_upgrades_registry_proto = out.File + file_upgrades_registry_proto_rawDesc = nil + file_upgrades_registry_proto_goTypes = nil + file_upgrades_registry_proto_depIdxs = nil +} diff --git a/internal/pkg/proto/upgrades_registry/upgrades_registry.pb.gw.go b/internal/pkg/proto/upgrades_registry/upgrades_registry.pb.gw.go new file mode 100644 index 0000000..07d8912 --- /dev/null +++ b/internal/pkg/proto/upgrades_registry/upgrades_registry.pb.gw.go @@ -0,0 +1,404 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: upgrades_registry.proto + +/* +Package upgrades_registry is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package upgrades_registry + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_UpgradeRegistry_AddUpgrade_0(ctx context.Context, marshaler runtime.Marshaler, client UpgradeRegistryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AddUpgradeRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AddUpgrade(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_UpgradeRegistry_AddUpgrade_0(ctx context.Context, marshaler runtime.Marshaler, server UpgradeRegistryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq AddUpgradeRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.AddUpgrade(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_UpgradeRegistry_ListUpgrades_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_UpgradeRegistry_ListUpgrades_0(ctx context.Context, marshaler runtime.Marshaler, client UpgradeRegistryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListUpgradesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_UpgradeRegistry_ListUpgrades_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListUpgrades(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_UpgradeRegistry_ListUpgrades_0(ctx context.Context, marshaler runtime.Marshaler, server UpgradeRegistryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListUpgradesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_UpgradeRegistry_ListUpgrades_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListUpgrades(ctx, &protoReq) + return msg, metadata, err + +} + +func request_UpgradeRegistry_CancelUpgrade_0(ctx context.Context, marshaler runtime.Marshaler, client UpgradeRegistryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CancelUpgradeRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.CancelUpgrade(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_UpgradeRegistry_CancelUpgrade_0(ctx context.Context, marshaler runtime.Marshaler, server UpgradeRegistryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CancelUpgradeRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.CancelUpgrade(ctx, &protoReq) + return msg, metadata, err + +} + +func request_UpgradeRegistry_ForceSync_0(ctx context.Context, marshaler runtime.Marshaler, client UpgradeRegistryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ForceSyncRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ForceSync(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_UpgradeRegistry_ForceSync_0(ctx context.Context, marshaler runtime.Marshaler, server UpgradeRegistryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ForceSyncRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ForceSync(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterUpgradeRegistryHandlerServer registers the http handlers for service UpgradeRegistry to "mux". +// UnaryRPC :call UpgradeRegistryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterUpgradeRegistryHandlerFromEndpoint instead. +func RegisterUpgradeRegistryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server UpgradeRegistryServer) error { + + mux.Handle("POST", pattern_UpgradeRegistry_AddUpgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/.UpgradeRegistry/AddUpgrade", runtime.WithHTTPPathPattern("/v1/upgrades/add")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_UpgradeRegistry_AddUpgrade_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UpgradeRegistry_AddUpgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_UpgradeRegistry_ListUpgrades_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/.UpgradeRegistry/ListUpgrades", runtime.WithHTTPPathPattern("/v1/upgrades/list")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_UpgradeRegistry_ListUpgrades_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UpgradeRegistry_ListUpgrades_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_UpgradeRegistry_CancelUpgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/.UpgradeRegistry/CancelUpgrade", runtime.WithHTTPPathPattern("/v1/upgrades/cancel")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_UpgradeRegistry_CancelUpgrade_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UpgradeRegistry_CancelUpgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_UpgradeRegistry_ForceSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/.UpgradeRegistry/ForceSync", runtime.WithHTTPPathPattern("/v1/upgrades/force_sync")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_UpgradeRegistry_ForceSync_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UpgradeRegistry_ForceSync_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterUpgradeRegistryHandlerFromEndpoint is same as RegisterUpgradeRegistryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterUpgradeRegistryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterUpgradeRegistryHandler(ctx, mux, conn) +} + +// RegisterUpgradeRegistryHandler registers the http handlers for service UpgradeRegistry to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterUpgradeRegistryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterUpgradeRegistryHandlerClient(ctx, mux, NewUpgradeRegistryClient(conn)) +} + +// RegisterUpgradeRegistryHandlerClient registers the http handlers for service UpgradeRegistry +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "UpgradeRegistryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "UpgradeRegistryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "UpgradeRegistryClient" to call the correct interceptors. +func RegisterUpgradeRegistryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client UpgradeRegistryClient) error { + + mux.Handle("POST", pattern_UpgradeRegistry_AddUpgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/.UpgradeRegistry/AddUpgrade", runtime.WithHTTPPathPattern("/v1/upgrades/add")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_UpgradeRegistry_AddUpgrade_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UpgradeRegistry_AddUpgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_UpgradeRegistry_ListUpgrades_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/.UpgradeRegistry/ListUpgrades", runtime.WithHTTPPathPattern("/v1/upgrades/list")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_UpgradeRegistry_ListUpgrades_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UpgradeRegistry_ListUpgrades_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_UpgradeRegistry_CancelUpgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/.UpgradeRegistry/CancelUpgrade", runtime.WithHTTPPathPattern("/v1/upgrades/cancel")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_UpgradeRegistry_CancelUpgrade_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UpgradeRegistry_CancelUpgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_UpgradeRegistry_ForceSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/.UpgradeRegistry/ForceSync", runtime.WithHTTPPathPattern("/v1/upgrades/force_sync")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_UpgradeRegistry_ForceSync_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UpgradeRegistry_ForceSync_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_UpgradeRegistry_AddUpgrade_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "upgrades", "add"}, "")) + + pattern_UpgradeRegistry_ListUpgrades_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "upgrades", "list"}, "")) + + pattern_UpgradeRegistry_CancelUpgrade_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "upgrades", "cancel"}, "")) + + pattern_UpgradeRegistry_ForceSync_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "upgrades", "force_sync"}, "")) +) + +var ( + forward_UpgradeRegistry_AddUpgrade_0 = runtime.ForwardResponseMessage + + forward_UpgradeRegistry_ListUpgrades_0 = runtime.ForwardResponseMessage + + forward_UpgradeRegistry_CancelUpgrade_0 = runtime.ForwardResponseMessage + + forward_UpgradeRegistry_ForceSync_0 = runtime.ForwardResponseMessage +) diff --git a/internal/pkg/proto/upgrades_registry/upgrades_registry_grpc.pb.go b/internal/pkg/proto/upgrades_registry/upgrades_registry_grpc.pb.go new file mode 100644 index 0000000..b67bed1 --- /dev/null +++ b/internal/pkg/proto/upgrades_registry/upgrades_registry_grpc.pb.go @@ -0,0 +1,243 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.28.2 +// source: upgrades_registry.proto + +package upgrades_registry + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + UpgradeRegistry_AddUpgrade_FullMethodName = "/UpgradeRegistry/AddUpgrade" + UpgradeRegistry_ListUpgrades_FullMethodName = "/UpgradeRegistry/ListUpgrades" + UpgradeRegistry_CancelUpgrade_FullMethodName = "/UpgradeRegistry/CancelUpgrade" + UpgradeRegistry_ForceSync_FullMethodName = "/UpgradeRegistry/ForceSync" +) + +// UpgradeRegistryClient is the client API for UpgradeRegistry service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type UpgradeRegistryClient interface { + // register a new upgrade with blazar + AddUpgrade(ctx context.Context, in *AddUpgradeRequest, opts ...grpc.CallOption) (*AddUpgradeResponse, error) + // list upgrades registered with blazar + ListUpgrades(ctx context.Context, in *ListUpgradesRequest, opts ...grpc.CallOption) (*ListUpgradesResponse, error) + // cancel upgrade + CancelUpgrade(ctx context.Context, in *CancelUpgradeRequest, opts ...grpc.CallOption) (*CancelUpgradeResponse, error) + // force the registry to sync the upgrades from all registered providers + ForceSync(ctx context.Context, in *ForceSyncRequest, opts ...grpc.CallOption) (*ForceSyncResponse, error) +} + +type upgradeRegistryClient struct { + cc grpc.ClientConnInterface +} + +func NewUpgradeRegistryClient(cc grpc.ClientConnInterface) UpgradeRegistryClient { + return &upgradeRegistryClient{cc} +} + +func (c *upgradeRegistryClient) AddUpgrade(ctx context.Context, in *AddUpgradeRequest, opts ...grpc.CallOption) (*AddUpgradeResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(AddUpgradeResponse) + err := c.cc.Invoke(ctx, UpgradeRegistry_AddUpgrade_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *upgradeRegistryClient) ListUpgrades(ctx context.Context, in *ListUpgradesRequest, opts ...grpc.CallOption) (*ListUpgradesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListUpgradesResponse) + err := c.cc.Invoke(ctx, UpgradeRegistry_ListUpgrades_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *upgradeRegistryClient) CancelUpgrade(ctx context.Context, in *CancelUpgradeRequest, opts ...grpc.CallOption) (*CancelUpgradeResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CancelUpgradeResponse) + err := c.cc.Invoke(ctx, UpgradeRegistry_CancelUpgrade_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *upgradeRegistryClient) ForceSync(ctx context.Context, in *ForceSyncRequest, opts ...grpc.CallOption) (*ForceSyncResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ForceSyncResponse) + err := c.cc.Invoke(ctx, UpgradeRegistry_ForceSync_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UpgradeRegistryServer is the server API for UpgradeRegistry service. +// All implementations must embed UnimplementedUpgradeRegistryServer +// for forward compatibility. +type UpgradeRegistryServer interface { + // register a new upgrade with blazar + AddUpgrade(context.Context, *AddUpgradeRequest) (*AddUpgradeResponse, error) + // list upgrades registered with blazar + ListUpgrades(context.Context, *ListUpgradesRequest) (*ListUpgradesResponse, error) + // cancel upgrade + CancelUpgrade(context.Context, *CancelUpgradeRequest) (*CancelUpgradeResponse, error) + // force the registry to sync the upgrades from all registered providers + ForceSync(context.Context, *ForceSyncRequest) (*ForceSyncResponse, error) + mustEmbedUnimplementedUpgradeRegistryServer() +} + +// UnimplementedUpgradeRegistryServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedUpgradeRegistryServer struct{} + +func (UnimplementedUpgradeRegistryServer) AddUpgrade(context.Context, *AddUpgradeRequest) (*AddUpgradeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddUpgrade not implemented") +} +func (UnimplementedUpgradeRegistryServer) ListUpgrades(context.Context, *ListUpgradesRequest) (*ListUpgradesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUpgrades not implemented") +} +func (UnimplementedUpgradeRegistryServer) CancelUpgrade(context.Context, *CancelUpgradeRequest) (*CancelUpgradeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelUpgrade not implemented") +} +func (UnimplementedUpgradeRegistryServer) ForceSync(context.Context, *ForceSyncRequest) (*ForceSyncResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ForceSync not implemented") +} +func (UnimplementedUpgradeRegistryServer) mustEmbedUnimplementedUpgradeRegistryServer() {} +func (UnimplementedUpgradeRegistryServer) testEmbeddedByValue() {} + +// UnsafeUpgradeRegistryServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to UpgradeRegistryServer will +// result in compilation errors. +type UnsafeUpgradeRegistryServer interface { + mustEmbedUnimplementedUpgradeRegistryServer() +} + +func RegisterUpgradeRegistryServer(s grpc.ServiceRegistrar, srv UpgradeRegistryServer) { + // If the following call pancis, it indicates UnimplementedUpgradeRegistryServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&UpgradeRegistry_ServiceDesc, srv) +} + +func _UpgradeRegistry_AddUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddUpgradeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpgradeRegistryServer).AddUpgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: UpgradeRegistry_AddUpgrade_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UpgradeRegistryServer).AddUpgrade(ctx, req.(*AddUpgradeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UpgradeRegistry_ListUpgrades_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUpgradesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpgradeRegistryServer).ListUpgrades(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: UpgradeRegistry_ListUpgrades_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UpgradeRegistryServer).ListUpgrades(ctx, req.(*ListUpgradesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UpgradeRegistry_CancelUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelUpgradeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpgradeRegistryServer).CancelUpgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: UpgradeRegistry_CancelUpgrade_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UpgradeRegistryServer).CancelUpgrade(ctx, req.(*CancelUpgradeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UpgradeRegistry_ForceSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ForceSyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpgradeRegistryServer).ForceSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: UpgradeRegistry_ForceSync_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UpgradeRegistryServer).ForceSync(ctx, req.(*ForceSyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// UpgradeRegistry_ServiceDesc is the grpc.ServiceDesc for UpgradeRegistry service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var UpgradeRegistry_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "UpgradeRegistry", + HandlerType: (*UpgradeRegistryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AddUpgrade", + Handler: _UpgradeRegistry_AddUpgrade_Handler, + }, + { + MethodName: "ListUpgrades", + Handler: _UpgradeRegistry_ListUpgrades_Handler, + }, + { + MethodName: "CancelUpgrade", + Handler: _UpgradeRegistry_CancelUpgrade_Handler, + }, + { + MethodName: "ForceSync", + Handler: _UpgradeRegistry_ForceSync_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "upgrades_registry.proto", +} diff --git a/internal/pkg/proto/version_resolver/version_resolver.pb.go b/internal/pkg/proto/version_resolver/version_resolver.pb.go new file mode 100644 index 0000000..4ef510a --- /dev/null +++ b/internal/pkg/proto/version_resolver/version_resolver.pb.go @@ -0,0 +1,542 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v5.28.2 +// source: version_resolver.proto + +package version_resolver + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + upgrades_registry "blazar/internal/pkg/proto/upgrades_registry" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // upgrade height the version tag is valid for + + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty" gorm:"primaryKey;not null"` + // chain network name + + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty" gorm:"primaryKey;type:text;not null"` + // version tag + Tag string `protobuf:"bytes,3,opt,name=tag,proto3" json:"tag,omitempty"` + // source of the upgrade + + Source upgrades_registry.ProviderType `protobuf:"varint,4,opt,name=source,proto3,enum=ProviderType" json:"source,omitempty" gorm:"not null"` + // the version priority + + Priority int32 `protobuf:"varint,5,opt,name=priority,proto3" json:"priority,omitempty" gorm:"primaryKey"` +} + +func (x *Version) Reset() { + *x = Version{} + mi := &file_version_resolver_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_version_resolver_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_version_resolver_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetHeight() int64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *Version) GetNetwork() string { + if x != nil { + return x.Network + } + return "" +} + +func (x *Version) GetTag() string { + if x != nil { + return x.Tag + } + return "" +} + +func (x *Version) GetSource() upgrades_registry.ProviderType { + if x != nil { + return x.Source + } + return upgrades_registry.ProviderType(0) +} + +func (x *Version) GetPriority() int32 { + if x != nil { + return x.Priority + } + return 0 +} + +type RegisterVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version *Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Overwrite bool `protobuf:"varint,2,opt,name=overwrite,proto3" json:"overwrite,omitempty"` +} + +func (x *RegisterVersionRequest) Reset() { + *x = RegisterVersionRequest{} + mi := &file_version_resolver_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RegisterVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegisterVersionRequest) ProtoMessage() {} + +func (x *RegisterVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_version_resolver_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegisterVersionRequest.ProtoReflect.Descriptor instead. +func (*RegisterVersionRequest) Descriptor() ([]byte, []int) { + return file_version_resolver_proto_rawDescGZIP(), []int{1} +} + +func (x *RegisterVersionRequest) GetVersion() *Version { + if x != nil { + return x.Version + } + return nil +} + +func (x *RegisterVersionRequest) GetOverwrite() bool { + if x != nil { + return x.Overwrite + } + return false +} + +type RegisterVersionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RegisterVersionResponse) Reset() { + *x = RegisterVersionResponse{} + mi := &file_version_resolver_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RegisterVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegisterVersionResponse) ProtoMessage() {} + +func (x *RegisterVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_version_resolver_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegisterVersionResponse.ProtoReflect.Descriptor instead. +func (*RegisterVersionResponse) Descriptor() ([]byte, []int) { + return file_version_resolver_proto_rawDescGZIP(), []int{2} +} + +type GetVersionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DisableCache bool `protobuf:"varint,1,opt,name=disable_cache,json=disableCache,proto3" json:"disable_cache,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (x *GetVersionRequest) Reset() { + *x = GetVersionRequest{} + mi := &file_version_resolver_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVersionRequest) ProtoMessage() {} + +func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_version_resolver_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. +func (*GetVersionRequest) Descriptor() ([]byte, []int) { + return file_version_resolver_proto_rawDescGZIP(), []int{3} +} + +func (x *GetVersionRequest) GetDisableCache() bool { + if x != nil { + return x.DisableCache + } + return false +} + +func (x *GetVersionRequest) GetHeight() int64 { + if x != nil { + return x.Height + } + return 0 +} + +type GetVersionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version *Version `protobuf:"bytes,1,opt,name=version,proto3,oneof" json:"version,omitempty"` +} + +func (x *GetVersionResponse) Reset() { + *x = GetVersionResponse{} + mi := &file_version_resolver_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetVersionResponse) ProtoMessage() {} + +func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_version_resolver_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. +func (*GetVersionResponse) Descriptor() ([]byte, []int) { + return file_version_resolver_proto_rawDescGZIP(), []int{4} +} + +func (x *GetVersionResponse) GetVersion() *Version { + if x != nil { + return x.Version + } + return nil +} + +type ListVersionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DisableCache bool `protobuf:"varint,1,opt,name=disable_cache,json=disableCache,proto3" json:"disable_cache,omitempty"` + Height *int64 `protobuf:"varint,2,opt,name=height,proto3,oneof" json:"height,omitempty"` + Source *upgrades_registry.ProviderType `protobuf:"varint,3,opt,name=source,proto3,enum=ProviderType,oneof" json:"source,omitempty"` +} + +func (x *ListVersionsRequest) Reset() { + *x = ListVersionsRequest{} + mi := &file_version_resolver_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListVersionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListVersionsRequest) ProtoMessage() {} + +func (x *ListVersionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_version_resolver_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListVersionsRequest.ProtoReflect.Descriptor instead. +func (*ListVersionsRequest) Descriptor() ([]byte, []int) { + return file_version_resolver_proto_rawDescGZIP(), []int{5} +} + +func (x *ListVersionsRequest) GetDisableCache() bool { + if x != nil { + return x.DisableCache + } + return false +} + +func (x *ListVersionsRequest) GetHeight() int64 { + if x != nil && x.Height != nil { + return *x.Height + } + return 0 +} + +func (x *ListVersionsRequest) GetSource() upgrades_registry.ProviderType { + if x != nil && x.Source != nil { + return *x.Source + } + return upgrades_registry.ProviderType(0) +} + +type ListVersionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Versions []*Version `protobuf:"bytes,1,rep,name=versions,proto3" json:"versions,omitempty"` +} + +func (x *ListVersionsResponse) Reset() { + *x = ListVersionsResponse{} + mi := &file_version_resolver_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListVersionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListVersionsResponse) ProtoMessage() {} + +func (x *ListVersionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_version_resolver_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListVersionsResponse.ProtoReflect.Descriptor instead. +func (*ListVersionsResponse) Descriptor() ([]byte, []int) { + return file_version_resolver_proto_rawDescGZIP(), []int{6} +} + +func (x *ListVersionsResponse) GetVersions() []*Version { + if x != nil { + return x.Versions + } + return nil +} + +var File_version_resolver_proto protoreflect.FileDescriptor + +var file_version_resolver_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, + 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x73, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x90, 0x01, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x68, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x10, 0x0a, + 0x03, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, + 0x25, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x0d, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, + 0x74, 0x79, 0x22, 0x5a, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x22, 0x19, + 0x0a, 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x49, 0x0a, 0x12, 0x47, + 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, + 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x0d, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x48, + 0x01, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, + 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x22, 0x3c, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x08, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x32, 0x98, 0x02, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, + 0x6c, 0x76, 0x65, 0x72, 0x12, 0x5c, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x17, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x3a, 0x01, 0x2a, + 0x22, 0x10, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, + 0x64, 0x64, 0x12, 0x4f, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x12, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x12, 0x12, 0x10, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x67, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x12, 0x11, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x42, 0x25, 0x5a, 0x23, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, + 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_version_resolver_proto_rawDescOnce sync.Once + file_version_resolver_proto_rawDescData = file_version_resolver_proto_rawDesc +) + +func file_version_resolver_proto_rawDescGZIP() []byte { + file_version_resolver_proto_rawDescOnce.Do(func() { + file_version_resolver_proto_rawDescData = protoimpl.X.CompressGZIP(file_version_resolver_proto_rawDescData) + }) + return file_version_resolver_proto_rawDescData +} + +var file_version_resolver_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_version_resolver_proto_goTypes = []any{ + (*Version)(nil), // 0: Version + (*RegisterVersionRequest)(nil), // 1: RegisterVersionRequest + (*RegisterVersionResponse)(nil), // 2: RegisterVersionResponse + (*GetVersionRequest)(nil), // 3: GetVersionRequest + (*GetVersionResponse)(nil), // 4: GetVersionResponse + (*ListVersionsRequest)(nil), // 5: ListVersionsRequest + (*ListVersionsResponse)(nil), // 6: ListVersionsResponse + (upgrades_registry.ProviderType)(0), // 7: ProviderType +} +var file_version_resolver_proto_depIdxs = []int32{ + 7, // 0: Version.source:type_name -> ProviderType + 0, // 1: RegisterVersionRequest.version:type_name -> Version + 0, // 2: GetVersionResponse.version:type_name -> Version + 7, // 3: ListVersionsRequest.source:type_name -> ProviderType + 0, // 4: ListVersionsResponse.versions:type_name -> Version + 1, // 5: VersionResolver.AddVersion:input_type -> RegisterVersionRequest + 3, // 6: VersionResolver.GetVersion:input_type -> GetVersionRequest + 5, // 7: VersionResolver.ListVersions:input_type -> ListVersionsRequest + 2, // 8: VersionResolver.AddVersion:output_type -> RegisterVersionResponse + 4, // 9: VersionResolver.GetVersion:output_type -> GetVersionResponse + 6, // 10: VersionResolver.ListVersions:output_type -> ListVersionsResponse + 8, // [8:11] is the sub-list for method output_type + 5, // [5:8] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_version_resolver_proto_init() } +func file_version_resolver_proto_init() { + if File_version_resolver_proto != nil { + return + } + file_version_resolver_proto_msgTypes[4].OneofWrappers = []any{} + file_version_resolver_proto_msgTypes[5].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_version_resolver_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_version_resolver_proto_goTypes, + DependencyIndexes: file_version_resolver_proto_depIdxs, + MessageInfos: file_version_resolver_proto_msgTypes, + }.Build() + File_version_resolver_proto = out.File + file_version_resolver_proto_rawDesc = nil + file_version_resolver_proto_goTypes = nil + file_version_resolver_proto_depIdxs = nil +} diff --git a/internal/pkg/proto/version_resolver/version_resolver.pb.gw.go b/internal/pkg/proto/version_resolver/version_resolver.pb.gw.go new file mode 100644 index 0000000..a145e9f --- /dev/null +++ b/internal/pkg/proto/version_resolver/version_resolver.pb.gw.go @@ -0,0 +1,337 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: version_resolver.proto + +/* +Package version_resolver is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package version_resolver + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_VersionResolver_AddVersion_0(ctx context.Context, marshaler runtime.Marshaler, client VersionResolverClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RegisterVersionRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AddVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_VersionResolver_AddVersion_0(ctx context.Context, marshaler runtime.Marshaler, server VersionResolverServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq RegisterVersionRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.AddVersion(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_VersionResolver_GetVersion_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_VersionResolver_GetVersion_0(ctx context.Context, marshaler runtime.Marshaler, client VersionResolverClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetVersionRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_VersionResolver_GetVersion_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_VersionResolver_GetVersion_0(ctx context.Context, marshaler runtime.Marshaler, server VersionResolverServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetVersionRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_VersionResolver_GetVersion_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetVersion(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_VersionResolver_ListVersions_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_VersionResolver_ListVersions_0(ctx context.Context, marshaler runtime.Marshaler, client VersionResolverClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListVersionsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_VersionResolver_ListVersions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListVersions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_VersionResolver_ListVersions_0(ctx context.Context, marshaler runtime.Marshaler, server VersionResolverServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListVersionsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_VersionResolver_ListVersions_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListVersions(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterVersionResolverHandlerServer registers the http handlers for service VersionResolver to "mux". +// UnaryRPC :call VersionResolverServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterVersionResolverHandlerFromEndpoint instead. +func RegisterVersionResolverHandlerServer(ctx context.Context, mux *runtime.ServeMux, server VersionResolverServer) error { + + mux.Handle("POST", pattern_VersionResolver_AddVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/.VersionResolver/AddVersion", runtime.WithHTTPPathPattern("/v1/versions/add")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_VersionResolver_AddVersion_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_VersionResolver_AddVersion_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_VersionResolver_GetVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/.VersionResolver/GetVersion", runtime.WithHTTPPathPattern("/v1/versions/get")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_VersionResolver_GetVersion_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_VersionResolver_GetVersion_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_VersionResolver_ListVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/.VersionResolver/ListVersions", runtime.WithHTTPPathPattern("/v1/versions/list")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_VersionResolver_ListVersions_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_VersionResolver_ListVersions_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterVersionResolverHandlerFromEndpoint is same as RegisterVersionResolverHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterVersionResolverHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterVersionResolverHandler(ctx, mux, conn) +} + +// RegisterVersionResolverHandler registers the http handlers for service VersionResolver to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterVersionResolverHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterVersionResolverHandlerClient(ctx, mux, NewVersionResolverClient(conn)) +} + +// RegisterVersionResolverHandlerClient registers the http handlers for service VersionResolver +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "VersionResolverClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "VersionResolverClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "VersionResolverClient" to call the correct interceptors. +func RegisterVersionResolverHandlerClient(ctx context.Context, mux *runtime.ServeMux, client VersionResolverClient) error { + + mux.Handle("POST", pattern_VersionResolver_AddVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/.VersionResolver/AddVersion", runtime.WithHTTPPathPattern("/v1/versions/add")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_VersionResolver_AddVersion_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_VersionResolver_AddVersion_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_VersionResolver_GetVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/.VersionResolver/GetVersion", runtime.WithHTTPPathPattern("/v1/versions/get")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_VersionResolver_GetVersion_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_VersionResolver_GetVersion_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_VersionResolver_ListVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/.VersionResolver/ListVersions", runtime.WithHTTPPathPattern("/v1/versions/list")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_VersionResolver_ListVersions_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_VersionResolver_ListVersions_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_VersionResolver_AddVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "versions", "add"}, "")) + + pattern_VersionResolver_GetVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "versions", "get"}, "")) + + pattern_VersionResolver_ListVersions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "versions", "list"}, "")) +) + +var ( + forward_VersionResolver_AddVersion_0 = runtime.ForwardResponseMessage + + forward_VersionResolver_GetVersion_0 = runtime.ForwardResponseMessage + + forward_VersionResolver_ListVersions_0 = runtime.ForwardResponseMessage +) diff --git a/internal/pkg/proto/version_resolver/version_resolver_grpc.pb.go b/internal/pkg/proto/version_resolver/version_resolver_grpc.pb.go new file mode 100644 index 0000000..8a42974 --- /dev/null +++ b/internal/pkg/proto/version_resolver/version_resolver_grpc.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.28.2 +// source: version_resolver.proto + +package version_resolver + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + VersionResolver_AddVersion_FullMethodName = "/VersionResolver/AddVersion" + VersionResolver_GetVersion_FullMethodName = "/VersionResolver/GetVersion" + VersionResolver_ListVersions_FullMethodName = "/VersionResolver/ListVersions" +) + +// VersionResolverClient is the client API for VersionResolver service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type VersionResolverClient interface { + // register a new version tag for a given height and network + AddVersion(ctx context.Context, in *RegisterVersionRequest, opts ...grpc.CallOption) (*RegisterVersionResponse, error) + // retrieve the version tag for a given height and network + GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) + // list all registered versions + ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) +} + +type versionResolverClient struct { + cc grpc.ClientConnInterface +} + +func NewVersionResolverClient(cc grpc.ClientConnInterface) VersionResolverClient { + return &versionResolverClient{cc} +} + +func (c *versionResolverClient) AddVersion(ctx context.Context, in *RegisterVersionRequest, opts ...grpc.CallOption) (*RegisterVersionResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RegisterVersionResponse) + err := c.cc.Invoke(ctx, VersionResolver_AddVersion_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *versionResolverClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetVersionResponse) + err := c.cc.Invoke(ctx, VersionResolver_GetVersion_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *versionResolverClient) ListVersions(ctx context.Context, in *ListVersionsRequest, opts ...grpc.CallOption) (*ListVersionsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListVersionsResponse) + err := c.cc.Invoke(ctx, VersionResolver_ListVersions_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// VersionResolverServer is the server API for VersionResolver service. +// All implementations must embed UnimplementedVersionResolverServer +// for forward compatibility. +type VersionResolverServer interface { + // register a new version tag for a given height and network + AddVersion(context.Context, *RegisterVersionRequest) (*RegisterVersionResponse, error) + // retrieve the version tag for a given height and network + GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) + // list all registered versions + ListVersions(context.Context, *ListVersionsRequest) (*ListVersionsResponse, error) + mustEmbedUnimplementedVersionResolverServer() +} + +// UnimplementedVersionResolverServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedVersionResolverServer struct{} + +func (UnimplementedVersionResolverServer) AddVersion(context.Context, *RegisterVersionRequest) (*RegisterVersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddVersion not implemented") +} +func (UnimplementedVersionResolverServer) GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVersion not implemented") +} +func (UnimplementedVersionResolverServer) ListVersions(context.Context, *ListVersionsRequest) (*ListVersionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListVersions not implemented") +} +func (UnimplementedVersionResolverServer) mustEmbedUnimplementedVersionResolverServer() {} +func (UnimplementedVersionResolverServer) testEmbeddedByValue() {} + +// UnsafeVersionResolverServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to VersionResolverServer will +// result in compilation errors. +type UnsafeVersionResolverServer interface { + mustEmbedUnimplementedVersionResolverServer() +} + +func RegisterVersionResolverServer(s grpc.ServiceRegistrar, srv VersionResolverServer) { + // If the following call pancis, it indicates UnimplementedVersionResolverServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&VersionResolver_ServiceDesc, srv) +} + +func _VersionResolver_AddVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RegisterVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionResolverServer).AddVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: VersionResolver_AddVersion_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionResolverServer).AddVersion(ctx, req.(*RegisterVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VersionResolver_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionResolverServer).GetVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: VersionResolver_GetVersion_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionResolverServer).GetVersion(ctx, req.(*GetVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VersionResolver_ListVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionResolverServer).ListVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: VersionResolver_ListVersions_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionResolverServer).ListVersions(ctx, req.(*ListVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// VersionResolver_ServiceDesc is the grpc.ServiceDesc for VersionResolver service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var VersionResolver_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "VersionResolver", + HandlerType: (*VersionResolverServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AddVersion", + Handler: _VersionResolver_AddVersion_Handler, + }, + { + MethodName: "GetVersion", + Handler: _VersionResolver_GetVersion_Handler, + }, + { + MethodName: "ListVersions", + Handler: _VersionResolver_ListVersions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "version_resolver.proto", +} diff --git a/internal/pkg/provider/chain/chain.go b/internal/pkg/provider/chain/chain.go new file mode 100644 index 0000000..69e0e2a --- /dev/null +++ b/internal/pkg/provider/chain/chain.go @@ -0,0 +1,227 @@ +package chain + +import ( + "context" + "slices" + + "blazar/internal/pkg/cosmos" + "blazar/internal/pkg/errors" + urproto "blazar/internal/pkg/proto/upgrades_registry" + "blazar/internal/pkg/provider" + + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" +) + +type Provider struct { + cosmosClient *cosmos.Client + chain string + priority int32 +} + +func NewProvider(cosmosClient *cosmos.Client, chain string, priority int32) *Provider { + return &Provider{ + cosmosClient: cosmosClient, + chain: chain, + priority: priority, + } +} + +func (p *Provider) GetUpgrades(ctx context.Context) ([]*urproto.Upgrade, error) { + upgrades, err := p.fetchAllUpgrades(ctx) + if err != nil { + return []*urproto.Upgrade{}, err + } + + // cosmos-sdk allows changing parameters of a previously passed upgrade + // by creating a new upgrade proposal with the same name in upgrade plan + // https://github.com/cosmos/cosmos-sdk/blob/41f92723399ef0affa90c6b3d8e7b47b82361280/x/upgrade/keeper/keeper.go#L185 + // since, upgrades is sorted by proposal ID and we'll only keep last instance for a name + // if a passed upgrade already exists for that name + passedNames := make(map[string]struct{}, len(upgrades)) + filtered := make([]chainUpgrade, 0, len(upgrades)) + slices.Reverse(upgrades) + for _, upgrade := range upgrades { + if _, ok := passedNames[upgrade.Name]; !ok { + if upgrade.Status == PASSED { + passedNames[upgrade.Name] = struct{}{} + } + filtered = append(filtered, upgrade) + } + } + + return toProto(filtered, p.priority), nil +} + +func (p *Provider) GetUpgradesByType(ctx context.Context, upgradeType urproto.UpgradeType) ([]*urproto.Upgrade, error) { + upgrades, err := p.GetUpgrades(ctx) + if err != nil { + return []*urproto.Upgrade{}, err + } + + filtered := make([]*urproto.Upgrade, 0, len(upgrades)) + for _, upgrade := range upgrades { + if upgrade.Type == upgradeType { + filtered = append(filtered, upgrade) + } + } + + return filtered, nil +} + +func (p *Provider) GetUpgradesByHeight(ctx context.Context, height int64) ([]*urproto.Upgrade, error) { + upgrades, err := p.GetUpgrades(ctx) + if err != nil { + return []*urproto.Upgrade{}, err + } + + filtered := make([]*urproto.Upgrade, 0, len(upgrades)) + for _, upgrade := range upgrades { + if upgrade.Height == height { + filtered = append(filtered, upgrade) + } + } + + return filtered, nil +} + +func (p *Provider) AddUpgrade(_ context.Context, _ *urproto.Upgrade, _ bool) error { + return errors.New("add upgrade is not supported for chain provider") +} + +func (p *Provider) RegisterVersion(_ uint64, _ string) error { + return errors.New("register version is not supported for chain provider") +} + +func (p *Provider) GetVersion(_ uint64) (string, error) { + return "", errors.New("get version is not supported for chain provider") +} + +func (p *Provider) CancelUpgrade(_ context.Context, _ int64, _ string) error { + return errors.New("cancel upgrade is not supported for chain provider") +} + +func (p *Provider) Type() urproto.ProviderType { + return urproto.ProviderType_CHAIN +} + +func (p *Provider) fetchAllUpgrades(ctx context.Context) ([]chainUpgrade, error) { + upgrades, errV1 := p.getUpgradeProposalsV1(ctx) + if errV1 != nil { + var errV1beta1 error + + upgrades, errV1beta1 = p.getUpgradeProposalsV1beta1(ctx) + if errV1beta1 != nil { + return []chainUpgrade{}, errors.Wrapf(errors.Join(errV1, errV1beta1), "failed to scrape upgrade proposals from both v1 and v1beta endpoints") + } + } + + return upgrades, nil +} + +func (p *Provider) getUpgradeProposalsV1beta1(ctx context.Context) ([]chainUpgrade, error) { + proposals, err := p.cosmosClient.GetProposalsV1beta1(ctx) + upgrades := make([]chainUpgrade, 0, 10) + if err != nil { + return nil, errors.Wrapf(err, "failed to get proposals from v1beta1 endpoint") + } + + for _, proposal := range proposals { + status := fromV1beta1(proposal.Status) + if status != REJECTED && status != FAILED { + upgrade, err := parseProposal( + proposal.Content.TypeUrl, + proposal.Content.Value, + status, + proposal.ProposalId, + p.chain, + ) + if err != nil { + return nil, err + } + if upgrade != nil { + upgrades = append(upgrades, *upgrade) + } + // NOTE: Blazar doesn't support MsgCancelUpgrade because we haven't seen it ever to be used + } + } + return upgrades, nil +} + +func (p *Provider) getUpgradeProposalsV1(ctx context.Context) ([]chainUpgrade, error) { + proposals, err := p.cosmosClient.GetProposalsV1(ctx) + upgrades := make([]chainUpgrade, 0, 10) + if err != nil { + return nil, errors.Wrapf(err, "failed to get proposals from v1 endpoint") + } + + for _, proposal := range proposals { + status := fromV1(proposal.Status) + if status != REJECTED && status != FAILED { + for _, msg := range proposal.Messages { + var ( + typeURL = msg.GetTypeUrl() + content = msg.GetValue() + ) + if msg.GetTypeUrl() == "/cosmos.gov.v1.MsgExecLegacyContent" { + legacyContent := &v1.MsgExecLegacyContent{} + if err := legacyContent.Unmarshal(msg.GetValue()); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal MsgExecLegacyContent in proposal id %d", proposal.GetId()) + } + + typeURL = legacyContent.Content.TypeUrl + content = legacyContent.Content.GetValue() + } + + upgrade, err := parseProposal( + typeURL, + content, + status, + proposal.GetId(), + p.chain, + ) + if err != nil { + return nil, err + } + if upgrade != nil { + upgrades = append(upgrades, *upgrade) + } + // NOTE: Blazar doesn't support MsgCancelUpgrade because we haven't seen it ever to be used + } + } + } + return upgrades, nil +} + +func parseProposal(typeURL string, content []byte, status ProposalStatus, proposalID uint64, chain string) (*chainUpgrade, error) { + upgrade, err := trySoftwareUpgradeProposal(typeURL, content, status, chain) + if err != nil { + return nil, errors.Wrapf(err, "failed to process proposal id %d", proposalID) + } + if upgrade != nil { + upgrade.ProposalID = proposalID + return upgrade, nil + } + + upgrade, err = tryMsgSoftwareUpgrade(typeURL, content, status, chain) + if err != nil { + return nil, errors.Wrapf(err, "failed to process proposal id %d", proposalID) + } + if upgrade != nil { + upgrade.ProposalID = proposalID + return upgrade, nil + } + + return nil, nil +} + +func toProto(upgrades []chainUpgrade, priority int32) []*urproto.Upgrade { + newUpgrades := make([]*urproto.Upgrade, 0, len(upgrades)) + + for _, upgrade := range upgrades { + upg := upgrade.ToProto() + provider.PostProcessUpgrade(&upg, urproto.ProviderType_CHAIN, priority) + newUpgrades = append(newUpgrades, &upg) + } + + return newUpgrades +} diff --git a/internal/pkg/provider/chain/types.go b/internal/pkg/provider/chain/types.go new file mode 100644 index 0000000..3989ed6 --- /dev/null +++ b/internal/pkg/provider/chain/types.go @@ -0,0 +1,158 @@ +package chain + +import ( + "fmt" + + "blazar/internal/pkg/errors" + urproto "blazar/internal/pkg/proto/upgrades_registry" + + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +type ProposalStatus int + +const ( + UNKNOWN ProposalStatus = -1 + UNSPECIFIED ProposalStatus = 0 + DEPOSIT_PERIOD ProposalStatus = 1 + VOTING_PERIOD ProposalStatus = 2 + PASSED ProposalStatus = 3 + REJECTED ProposalStatus = 4 + FAILED ProposalStatus = 5 +) + +func (ps ProposalStatus) String() string { + switch ps { + case UNKNOWN: + return "UNKNOWN" + case UNSPECIFIED: + return "UNSPECIFIED" + case DEPOSIT_PERIOD: + return "DEPOSIT_PERIOD" + case VOTING_PERIOD: + return "VOTING_PERIOD" + case PASSED: + return "PASSED" + case REJECTED: + return "REJECTED" + case FAILED: + return "FAILED" + default: + return fmt.Sprintf("%d", int(ps)) + } +} + +type chainUpgrade struct { + Height int64 + Name string + Status ProposalStatus + Network string + ProposalID uint64 +} + +func (cu chainUpgrade) ToProto() urproto.Upgrade { + source := urproto.ProviderType_CHAIN + + upgradeStatus := urproto.UpgradeStatus_UNKNOWN + switch cu.Status { + case UNKNOWN: + case UNSPECIFIED: + upgradeStatus = urproto.UpgradeStatus_UNKNOWN + case DEPOSIT_PERIOD: + upgradeStatus = urproto.UpgradeStatus_SCHEDULED + case VOTING_PERIOD: + upgradeStatus = urproto.UpgradeStatus_SCHEDULED + case PASSED: + upgradeStatus = urproto.UpgradeStatus_ACTIVE + case REJECTED: + upgradeStatus = urproto.UpgradeStatus_CANCELLED + case FAILED: + upgradeStatus = urproto.UpgradeStatus_CANCELLED + } + + // #nosec G115 + proposalID := int64(cu.ProposalID) + return urproto.Upgrade{ + Height: cu.Height, + Tag: "", + Network: cu.Network, + Name: cu.Name, + Type: urproto.UpgradeType_GOVERNANCE, + Status: upgradeStatus, + Source: source, + ProposalId: &proposalID, + } +} + +func fromV1(status v1.ProposalStatus) ProposalStatus { + switch status { + case v1.ProposalStatus_PROPOSAL_STATUS_UNSPECIFIED: + return UNSPECIFIED + case v1.ProposalStatus_PROPOSAL_STATUS_DEPOSIT_PERIOD: + return DEPOSIT_PERIOD + case v1.ProposalStatus_PROPOSAL_STATUS_VOTING_PERIOD: + return VOTING_PERIOD + case v1.ProposalStatus_PROPOSAL_STATUS_PASSED: + return PASSED + case v1.ProposalStatus_PROPOSAL_STATUS_REJECTED: + return REJECTED + case v1.ProposalStatus_PROPOSAL_STATUS_FAILED: + return FAILED + default: + return UNKNOWN + } +} + +func fromV1beta1(status v1beta1.ProposalStatus) ProposalStatus { + switch status { + case v1beta1.StatusNil: + return UNSPECIFIED + case v1beta1.StatusDepositPeriod: + return DEPOSIT_PERIOD + case v1beta1.StatusVotingPeriod: + return VOTING_PERIOD + case v1beta1.StatusPassed: + return PASSED + case v1beta1.StatusRejected: + return REJECTED + case v1beta1.StatusFailed: + return FAILED + default: + return UNKNOWN + } +} + +func trySoftwareUpgradeProposal(typeURL string, value []byte, status ProposalStatus, chain string) (*chainUpgrade, error) { + if typeURL == "/cosmos.upgrade.v1beta1.SoftwareUpgradeProposal" { + // this is deprecated but still widely used on chains + upgrade := &upgradetypes.SoftwareUpgradeProposal{} + if err := upgrade.Unmarshal(value); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal SoftwareUpgradeProposal") + } + return &chainUpgrade{ + Height: upgrade.Plan.Height, + Name: upgrade.Plan.Name, + Status: status, + Network: chain, + }, nil + } + return nil, nil +} + +func tryMsgSoftwareUpgrade(typeURL string, value []byte, status ProposalStatus, chain string) (*chainUpgrade, error) { + if typeURL == "/cosmos.upgrade.v1beta1.MsgSoftwareUpgrade" { + upgrade := &upgradetypes.MsgSoftwareUpgrade{} + if err := upgrade.Unmarshal(value); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal MsgSoftwareUpgrade") + } + return &chainUpgrade{ + Height: upgrade.Plan.Height, + Name: upgrade.Plan.Name, + Status: status, + Network: chain, + }, nil + } + return nil, nil +} diff --git a/internal/pkg/provider/database/database.go b/internal/pkg/provider/database/database.go new file mode 100644 index 0000000..cbad5d2 --- /dev/null +++ b/internal/pkg/provider/database/database.go @@ -0,0 +1,223 @@ +package database + +import ( + "context" + "fmt" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/errors" + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" + "blazar/internal/pkg/provider" + + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/clause" +) + +type Provider struct { + db *gorm.DB + priority int32 + network string +} + +func NewDatabaseProviderWithDB(db *gorm.DB, network string, priority int32) *Provider { + return &Provider{ + db: db, + network: network, + priority: priority, + } +} + +func NewDatabaseProvider(cfg *config.DatabaseProvider, network string) (*Provider, error) { + db, err := InitDB(cfg, &gorm.Config{}) + if err != nil { + return nil, err + } + + if cfg.AutoMigrate { + if err := AutoMigrate(db); err != nil { + return nil, err + } + } + + provider := &Provider{ + db: db, + network: network, + priority: cfg.DefaultPriority, + } + + return provider, nil +} + +func (dp Provider) GetUpgrades(ctx context.Context) ([]*urproto.Upgrade, error) { + var dbUpgrades []*urproto.Upgrade + result := dp.db.WithContext(ctx).Where("network = ?", dp.network).Find(&dbUpgrades) + if result.Error != nil { + return []*urproto.Upgrade{}, errors.Wrapf(result.Error, "failed to get upgrades from database") + } + + return provider.PostProcessUpgrades(dbUpgrades, urproto.ProviderType_DATABASE, dp.priority), nil +} + +func (dp Provider) GetUpgradesByType(ctx context.Context, upgradeType urproto.UpgradeType) ([]*urproto.Upgrade, error) { + var dbUpgrades []*urproto.Upgrade + result := dp.db.WithContext(ctx).Where("network = ? AND type = ?", dp.network, upgradeType.String()).Find(&dbUpgrades) + if result.Error != nil { + return []*urproto.Upgrade{}, errors.Wrapf(result.Error, "failed to get upgrades by type from database") + } + + return provider.PostProcessUpgrades(dbUpgrades, urproto.ProviderType_DATABASE, dp.priority), nil +} + +func (dp Provider) GetUpgradesByHeight(ctx context.Context, height int64) ([]*urproto.Upgrade, error) { + var upgrades []*urproto.Upgrade + result := dp.db.WithContext(ctx).Where("height = ? AND network = ?", height, dp.network).Find(&upgrades) + if result.Error != nil { + return []*urproto.Upgrade{}, errors.Wrapf(result.Error, "failed to get upgrade by id from database") + } + + return provider.PostProcessUpgrades(upgrades, urproto.ProviderType_DATABASE, dp.priority), nil +} + +func (dp Provider) AddUpgrade(ctx context.Context, upgrade *urproto.Upgrade, overwrite bool) error { + provider.PostProcessUpgrade(upgrade, urproto.ProviderType_DATABASE, dp.priority) + + // update the entry if exists or create a new one (depends on overwrite flag) + if overwrite { + result := dp.db.WithContext(ctx).Clauses(clause.OnConflict{ + // NOTE: this is the compound primary key + Columns: []clause.Column{{Name: "height"}, {Name: "network"}, {Name: "priority"}}, + // this should include the rest of the columns + // NOTE: status and step is managed by blazar state machine and should not be updated + DoUpdates: clause.AssignmentColumns([]string{"tag", "name", "type" /* "status", */ /* step, */, "source", "proposal_id"}), + }).Create(upgrade) + return result.Error + } + + result := dp.db.Create(upgrade) + return result.Error +} + +func (dp Provider) RegisterVersion(ctx context.Context, version *vrproto.Version, overwrite bool) error { + provider.PostProcessVersion(version, urproto.ProviderType_DATABASE, dp.priority) + + if overwrite { + result := dp.db.WithContext(ctx).Clauses(clause.OnConflict{ + // NOTE: this is the compound primary key + Columns: []clause.Column{{Name: "height"}, {Name: "network"}, {Name: "priority"}}, + // this should include the rest of the columns + DoUpdates: clause.AssignmentColumns([]string{"tag", "source"}), + }).Create(version) + return result.Error + } + + result := dp.db.WithContext(ctx).Create(version) + return result.Error +} + +func (dp Provider) GetVersions(ctx context.Context) ([]*vrproto.Version, error) { + var versions []*vrproto.Version + + result := dp.db.WithContext(ctx).Where("network = ?", dp.network).Find(&versions) + if result.Error != nil { + return []*vrproto.Version{}, errors.Wrapf(result.Error, "failed to get versions from database") + } + + return provider.PostProcessVersions(versions, urproto.ProviderType_DATABASE, dp.priority), nil +} + +func (dp Provider) GetVersionsByHeight(ctx context.Context, height uint64) ([]*vrproto.Version, error) { + var versions []*vrproto.Version + + result := dp.db.WithContext(ctx).Where("height = ? AND network = ?", height, dp.network).Find(&versions) + if result.Error != nil { + return nil, errors.Wrapf(result.Error, "failed to get version by height from database") + } + + if versions == nil { + return nil, fmt.Errorf("version not found for height: %d", height) + } + + return provider.PostProcessVersions(versions, urproto.ProviderType_DATABASE, dp.priority), nil +} + +func (dp Provider) CancelUpgrade(ctx context.Context, height int64, network string) error { + total := int64(0) + result := dp.db.WithContext(ctx).Model(&urproto.Upgrade{}).Where("network = ? AND height = ?", dp.network, height).Count(&total) + + if result.Error != nil { + return errors.Wrapf(result.Error, "failed to count upgrades from database") + } + + if total == 0 { + // if there is no upgrades registered (in database provider) blazar will create one with status CANCELLED + result := dp.db.WithContext(ctx).Model(&urproto.Upgrade{}).Create(&urproto.Upgrade{ + Height: height, + Network: network, + Priority: dp.priority, + Name: "", + Type: urproto.UpgradeType_NON_GOVERNANCE_UNCOORDINATED, + Status: urproto.UpgradeStatus_CANCELLED, + Step: urproto.UpgradeStep_NONE, + Source: urproto.ProviderType_DATABASE, + ProposalId: nil, + }) + + if result.Error != nil { + return errors.Wrapf(result.Error, "failed to create cancellation upgrade") + } + } else { + // if the upgrade is already registered in the database + // update the record with highest priority for given height + // + // Equivalent SQL query + // ``` + // UPDATE "upgrades" SET priority = ( + // SELECT MAX(priority) FROM "upgrades" WHERE height = XXX AND network = 'XXX' + // ), status=6 WHERE height = XXX AND network = 'XXX' + // ``` + result := dp.db.WithContext(ctx).Model(&urproto.Upgrade{}).Where( + "height = ? AND network = ?", height, network, + ).Updates( + map[string]interface{}{ + "status": urproto.UpgradeStatus_CANCELLED, + "priority": dp.db.Model(&urproto.Upgrade{}).Select("MAX(priority)").Where("height = ? AND network = ?", height, network), + }, + ) + + if result.Error != nil { + return errors.Wrapf(result.Error, "failed to cancel upgrade from database") + } + } + + return nil +} + +func (dp Provider) Type() urproto.ProviderType { + return urproto.ProviderType_DATABASE +} + +func InitDB(cfg *config.DatabaseProvider, gcfg *gorm.Config) (*gorm.DB, error) { + mode := string(cfg.SslMode) + + dsn := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s", cfg.Host, cfg.User, cfg.Password, cfg.DB, cfg.Port, mode) + db, err := gorm.Open(postgres.Open(dsn), gcfg) + if err != nil { + return nil, errors.Wrapf(err, "failed to connect database") + } + + return db, nil +} + +func AutoMigrate(db *gorm.DB) error { + if err := db.AutoMigrate(&urproto.Upgrade{}); err != nil { + return errors.Wrapf(err, "database migration failed for upgrades table") + } + + if err := db.AutoMigrate(&vrproto.Version{}); err != nil { + return errors.Wrapf(err, "database migration failed for versions table") + } + + return nil +} diff --git a/internal/pkg/provider/local/local.go b/internal/pkg/provider/local/local.go new file mode 100644 index 0000000..c00ddae --- /dev/null +++ b/internal/pkg/provider/local/local.go @@ -0,0 +1,354 @@ +package local + +import ( + "context" + "encoding/json" + "fmt" + "os" + "slices" + "sync" + + "blazar/internal/pkg/errors" + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" + "blazar/internal/pkg/provider" + sm "blazar/internal/pkg/state_machine" + + "google.golang.org/protobuf/encoding/protojson" +) + +type localProviderData struct { + Upgrades []*urproto.Upgrade `json:"upgrades"` + Versions []*vrproto.Version `json:"versions"` + State *sm.State `json:"state"` +} + +var JSONMarshaller = protojson.MarshalOptions{ + Multiline: true, + Indent: " ", +} + +type Provider struct { + configPath string + network string + priority int32 + lock *sync.RWMutex +} + +func NewProvider(configPath, network string, priority int32) (*Provider, error) { + if _, err := os.Stat(configPath); os.IsNotExist(err) { + d := localProviderData{} + jsonData, err := json.Marshal(&d) + if err != nil { + return nil, errors.Wrapf(err, "could not marshal local provider data into json") + } + if err := os.WriteFile(configPath, jsonData, 0600); err != nil { + return nil, errors.Wrapf(err, "could not create local provider data file") + } + } + + ur := &Provider{ + configPath: configPath, + network: network, + priority: priority, + lock: &sync.RWMutex{}, + } + + return ur, nil +} + +func (lp *Provider) GetUpgrades(_ context.Context) ([]*urproto.Upgrade, error) { + data, err := lp.readData(true) + if err != nil { + return nil, err + } + + return provider.PostProcessUpgrades(data.Upgrades, urproto.ProviderType_LOCAL, lp.priority), nil +} + +func (lp *Provider) GetUpgradesByHeight(ctx context.Context, height int64) ([]*urproto.Upgrade, error) { + upgrades, err := lp.GetUpgrades(ctx) + if err != nil { + return []*urproto.Upgrade{}, err + } + + filtered := make([]*urproto.Upgrade, 0, len(upgrades)) + for _, upgrade := range upgrades { + if upgrade.Height == height { + filtered = append(filtered, upgrade) + } + } + + return provider.PostProcessUpgrades(filtered, urproto.ProviderType_LOCAL, lp.priority), nil +} + +func (lp *Provider) GetUpgradesByType(ctx context.Context, upgradeType urproto.UpgradeType) ([]*urproto.Upgrade, error) { + upgrades, err := lp.GetUpgrades(ctx) + if err != nil { + return []*urproto.Upgrade{}, err + } + + filtered := make([]*urproto.Upgrade, 0, len(upgrades)) + for _, upgrade := range upgrades { + if upgrade.Type == upgradeType { + filtered = append(filtered, upgrade) + } + } + + return provider.PostProcessUpgrades(filtered, urproto.ProviderType_LOCAL, lp.priority), nil +} + +func (lp *Provider) AddUpgrade(_ context.Context, upgrade *urproto.Upgrade, overwrite bool) error { + provider.PostProcessUpgrade(upgrade, urproto.ProviderType_LOCAL, lp.priority) + + lp.lock.Lock() + defer lp.lock.Unlock() + + if upgrade.Network != lp.network { + return fmt.Errorf("upgrade network %s does not match %s", upgrade.Network, lp.network) + } + + data, err := lp.readData(false) + if err != nil { + return err + } + upgrades := data.Upgrades + + for n, existingUpgrade := range upgrades { + if existingUpgrade.Height == upgrade.Height && existingUpgrade.Priority == upgrade.Priority { + if !overwrite { + return fmt.Errorf("upgrade for height %d and priority %d already registered", upgrade.Height, upgrade.Priority) + } + upgrades = slices.Delete(upgrades, n, n+1) + break + } + } + + upgrades = append(upgrades, upgrade) + data.Upgrades = upgrades + + jsonData, err := json.Marshal(&data) + if err != nil { + return err + } + + return os.WriteFile(lp.configPath, jsonData, 0600) +} + +func (lp *Provider) RegisterVersion(_ context.Context, version *vrproto.Version, overwrite bool) error { + provider.PostProcessVersion(version, urproto.ProviderType_LOCAL, lp.priority) + + lp.lock.Lock() + defer lp.lock.Unlock() + + if version.Network != lp.network { + return fmt.Errorf("version network %s does not match %s", version.Network, lp.network) + } + + data, err := lp.readData(false) + if err != nil { + return err + } + versions := data.Versions + + for n, existingVersion := range versions { + if existingVersion.Height == version.Height && existingVersion.Priority == version.Priority { + if !overwrite { + return fmt.Errorf("version for height=%d, priority=%d already registered", version.Height, version.Priority) + } + versions = slices.Delete(versions, n, n+1) + break + } + } + + versions = append(versions, version) + data.Versions = versions + + jsonData, err := json.Marshal(&data) + if err != nil { + return err + } + + return os.WriteFile(lp.configPath, jsonData, 0600) +} + +func (lp *Provider) GetVersions(_ context.Context) ([]*vrproto.Version, error) { + data, err := lp.readData(true) + if err != nil { + return nil, err + } + + return provider.PostProcessVersions(data.Versions, urproto.ProviderType_LOCAL, lp.priority), nil +} + +func (lp *Provider) GetVersionsByHeight(ctx context.Context, height uint64) ([]*vrproto.Version, error) { + versions, err := lp.GetVersions(ctx) + if err != nil { + return nil, err + } + + filtered := []*vrproto.Version{} + for _, version := range versions { + // #nosec G115 + if version.Height == int64(height) { + filtered = append(filtered, version) + } + } + + return provider.PostProcessVersions(filtered, urproto.ProviderType_LOCAL, lp.priority), nil +} + +func (lp *Provider) StoreState(_ context.Context, state *sm.State) error { + lp.lock.Lock() + defer lp.lock.Unlock() + + data, err := lp.readData(false) + if err != nil { + return err + } + data.State = state + + jsonData, err := json.Marshal(&data) + if err != nil { + return err + } + + return os.WriteFile(lp.configPath, jsonData, 0600) +} + +func (lp *Provider) checkUniqueKey(data *localProviderData) error { + type uniqueKey struct { + height int64 + priority int32 + } + + heightPrioritySet := make(map[uniqueKey]struct{}) + + for _, upgrade := range data.Upgrades { + if _, ok := heightPrioritySet[uniqueKey{height: upgrade.Height, priority: upgrade.Priority}]; ok { + return fmt.Errorf("found multiple upgrades for height=%d, priority=%d", upgrade.Height, upgrade.Priority) + } + heightPrioritySet[uniqueKey{height: upgrade.Height, priority: upgrade.Priority}] = struct{}{} + } + + heightPrioritySet = make(map[uniqueKey]struct{}) + + for _, version := range data.Versions { + if _, ok := heightPrioritySet[uniqueKey{height: version.Height, priority: version.Priority}]; ok { + return fmt.Errorf("found multiple versions for height=%d, priority=%d", version.Height, version.Priority) + } + heightPrioritySet[uniqueKey{height: version.Height, priority: version.Priority}] = struct{}{} + } + + for _, version := range data.Versions { + if version.Network != lp.network { + return fmt.Errorf("network %s does not match configured network %s", version.Network, lp.network) + } + } + + for _, upgrade := range data.Upgrades { + if upgrade.Network != lp.network { + return fmt.Errorf("network %s does not match configured network %s", upgrade.Network, lp.network) + } + } + return nil +} + +func (lp *Provider) RestoreState(_ context.Context) (*sm.State, error) { + data, err := lp.readData(true) + if err != nil { + return nil, err + } + + if err := lp.checkUniqueKey(data); err != nil { + return nil, err + } + + return data.State, nil +} + +func (lp *Provider) CancelUpgrade(_ context.Context, height int64, network string) error { + if network != lp.network { + return fmt.Errorf("the network %s does not match local provider: %s", network, lp.network) + } + + lp.lock.Lock() + defer lp.lock.Unlock() + + data, err := lp.readData(false) + if err != nil { + return err + } + upgrades := data.Upgrades + + upgradeWithHighestPriority, pos := &urproto.Upgrade{Priority: 0}, 0 + for n, existingUpgrade := range upgrades { + if existingUpgrade.Height == height && existingUpgrade.Priority > upgradeWithHighestPriority.Priority { + upgradeWithHighestPriority, pos = existingUpgrade, n + } + } + + if upgradeWithHighestPriority.Priority == 0 { + // if there is no upgrades registered (in local provider) blazar will create one with status CANCELLED + cancellationUpgrade := &urproto.Upgrade{ + Height: height, + Network: network, + Priority: lp.priority, + Name: "", + Type: urproto.UpgradeType_NON_GOVERNANCE_UNCOORDINATED, + Status: urproto.UpgradeStatus_CANCELLED, + Step: urproto.UpgradeStep_NONE, + Source: urproto.ProviderType_DATABASE, + ProposalId: nil, + } + + upgrades = append(upgrades, cancellationUpgrade) + data.Upgrades = upgrades + } else { + // if there is an upgrade with the same height and priority, blazar will cancel it + upgrades[pos].Status = urproto.UpgradeStatus_CANCELLED + data.Upgrades = upgrades + } + + jsonData, err := json.Marshal(&data) + if err != nil { + return err + } + + return os.WriteFile(lp.configPath, jsonData, 0600) +} + +func (lp *Provider) Type() urproto.ProviderType { + return urproto.ProviderType_LOCAL +} + +func (lp *Provider) readData(lock bool) (*localProviderData, error) { + if lock { + lp.lock.RLock() + defer lp.lock.RUnlock() + } + + var localData localProviderData + + fileData, err := os.ReadFile(lp.configPath) + if os.IsNotExist(err) { + jsonData, err := json.Marshal(&localData) + if err != nil { + return nil, errors.Wrapf(err, "could not marshal new upgrades file to protobuf") + } + + if err := os.WriteFile(lp.configPath, jsonData, 0600); err != nil { + return nil, errors.Wrapf(err, "could not create new upgrades file") + } + return &localData, nil + } + if err != nil { + return nil, errors.Wrapf(err, "could not read %s upgrades file", lp.configPath) + } + + if err := json.Unmarshal(fileData, &localData); err != nil { + return nil, errors.Wrapf(err, "could not unmarshal %s upgrades file", lp.configPath) + } + + return &localData, nil +} diff --git a/internal/pkg/provider/local/local_test.go b/internal/pkg/provider/local/local_test.go new file mode 100644 index 0000000..1b5d5a7 --- /dev/null +++ b/internal/pkg/provider/local/local_test.go @@ -0,0 +1,141 @@ +package local + +import ( + "context" + "path" + "sync" + "testing" + + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLoading(t *testing.T) { + lp := Provider{ + configPath: "../../../../testdata/provider/local/test.json", + network: "test", + priority: 1, + lock: &sync.RWMutex{}, + } + + upgrades, err := lp.readData(true) + require.NoError(t, err) + shouldBeUpgrades := []*urproto.Upgrade{ + { + Height: 10, + Tag: "v1.0.0", + Network: "test", + Name: "invalid_upcoming_upgrade_due_to_passed_height", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: urproto.ProviderType_LOCAL, + Priority: 1, + }, + { + Height: 100, + Tag: "v1.0.0", + Network: "test", + Name: "valid_upcoming_upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: urproto.ProviderType_LOCAL, + Priority: 1, + }, + { + Height: 101, + Tag: "", + Network: "test", + Name: "valid_upgrade_without_tag", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: urproto.ProviderType_LOCAL, + Priority: 1, + }, + { + Height: 102, + Tag: "v1.0.0", + Network: "test", + Name: "invalid_upcoming_upgrade_due_to_cancelled_status", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_CANCELLED, + Source: urproto.ProviderType_LOCAL, + Priority: 1, + }, + } + shouldBeVersions := []*vrproto.Version{ + { + Height: 10, + Tag: "a-tag", + Network: "test", + Priority: 1, + Source: 0, + }, + } + assert.Equal(t, len(shouldBeUpgrades), len(upgrades.Upgrades)) + for i := range shouldBeUpgrades { + assert.EqualExportedValues(t, shouldBeUpgrades[i], upgrades.Upgrades[i]) + } + assert.Equal(t, len(shouldBeVersions), len(upgrades.Versions)) + for i := range shouldBeVersions { + assert.EqualExportedValues(t, shouldBeVersions[i], upgrades.Versions[i]) + } +} + +func TestNonExistentFile(t *testing.T) { + dir := t.TempDir() + + lp, err := NewProvider(path.Join(dir, "non-existing.json"), "test", 1) + require.NoError(t, err) + data, err := lp.readData(true) + require.NoError(t, err) + assert.Empty(t, data.Upgrades) + assert.Empty(t, data.Versions) + assert.Nil(t, data.State) +} + +func TestLoadFailing(t *testing.T) { + tests := []struct { + name string + file string + err string + }{ + { + name: "TestDuplicateUpgrades", + file: "../../../../testdata/provider/local/duplicate-upgrade.json", + err: "found multiple upgrades for height=10, priority=1", + }, + { + name: "TestDuplicateVersion", + file: "../../../../testdata/provider/local/duplicate-version.json", + err: "found multiple versions for height=10, priority=1", + }, + { + name: "TestDifferentNetworkUpgrade", + file: "../../../../testdata/provider/local/different-upgrade-network.json", + err: "network not-test does not match configured network test", + }, + { + name: "TestDifferentNetworkVersion", + file: "../../../../testdata/provider/local/different-version-network.json", + err: "network not-test does not match configured network test", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lp := Provider{ + configPath: tt.file, + network: "test", + priority: 1, + lock: &sync.RWMutex{}, + } + + _, err := lp.RestoreState(context.Background()) + require.Error(t, err) + assert.Contains(t, err.Error(), tt.err) + }) + } +} diff --git a/internal/pkg/provider/provider.go b/internal/pkg/provider/provider.go new file mode 100644 index 0000000..6540ee7 --- /dev/null +++ b/internal/pkg/provider/provider.go @@ -0,0 +1,61 @@ +package provider + +import ( + "context" + + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" +) + +// VersionResolver is an interface for fetching versions from an external source +type VersionResolver interface { + RegisterVersion(ctx context.Context, version *vrproto.Version, overwrite bool) error + GetVersions(ctx context.Context) ([]*vrproto.Version, error) + GetVersionsByHeight(ctx context.Context, height uint64) ([]*vrproto.Version, error) +} + +// UpgradesProvider is an interface for fetching upgrades from an external source +type UpgradeProvider interface { + GetUpgrades(ctx context.Context) ([]*urproto.Upgrade, error) + GetUpgradesByType(ctx context.Context, upgradeType urproto.UpgradeType) ([]*urproto.Upgrade, error) + GetUpgradesByHeight(ctx context.Context, height int64) ([]*urproto.Upgrade, error) + AddUpgrade(ctx context.Context, upgrade *urproto.Upgrade, overwrite bool) error + CancelUpgrade(ctx context.Context, height int64, network string) error + Type() urproto.ProviderType +} + +func PostProcessUpgrades(upgrades []*urproto.Upgrade, source urproto.ProviderType, priority int32) []*urproto.Upgrade { + for n := range upgrades { + PostProcessUpgrade(upgrades[n], source, priority) + } + + return upgrades +} + +func PostProcessUpgrade(upgrade *urproto.Upgrade, source urproto.ProviderType, priority int32) { + if upgrade.Source != source { + upgrade.Source = source + } + + if upgrade.Priority == 0 { + upgrade.Priority = priority + } +} + +func PostProcessVersions(versions []*vrproto.Version, source urproto.ProviderType, priority int32) []*vrproto.Version { + for n := range versions { + PostProcessVersion(versions[n], source, priority) + } + + return versions +} + +func PostProcessVersion(version *vrproto.Version, source urproto.ProviderType, priority int32) { + if version.Source != source { + version.Source = source + } + + if version.Priority == 0 { + version.Priority = priority + } +} diff --git a/internal/pkg/proxy/config.go b/internal/pkg/proxy/config.go new file mode 100644 index 0000000..cc88127 --- /dev/null +++ b/internal/pkg/proxy/config.go @@ -0,0 +1,53 @@ +package proxy + +import ( + "blazar/internal/pkg/errors" + + "github.com/BurntSushi/toml" +) + +type Instance struct { + Name string `toml:"name"` + Host string `toml:"host"` + HTTPPort int `toml:"http-port"` + GRPCPort int `toml:"grpc-port"` + Network string `toml:"network"` +} + +type Config struct { + Host string `toml:"host"` + HTTPPort uint16 `toml:"http-port"` + Instances []Instance `toml:"instance"` +} + +func ReadConfig(cfgFile string) (*Config, error) { + var config Config + _, err := toml.DecodeFile(cfgFile, &config) + if err != nil { + return nil, errors.Wrapf(err, "could not decode config file") + } + return &config, nil +} + +func (cfg *Config) ValidateAll() error { + if len(cfg.Instances) == 0 { + return errors.New("no instances specified") + } + + for _, instance := range cfg.Instances { + if instance.Name == "" { + return errors.New("instance name not specified") + } + if instance.Host == "" { + return errors.New("instance hostnot specified") + } + if instance.HTTPPort == 0 { + return errors.New("instance http port not specified") + } + if instance.GRPCPort == 0 { + return errors.New("instance grpc port not specified") + } + } + + return nil +} diff --git a/internal/pkg/proxy/index.go b/internal/pkg/proxy/index.go new file mode 100644 index 0000000..67765cf --- /dev/null +++ b/internal/pkg/proxy/index.go @@ -0,0 +1,189 @@ +package proxy + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "sort" + "strconv" + "sync" + "text/template" + "time" + + urproto "blazar/internal/pkg/proto/upgrades_registry" + "blazar/internal/pkg/static" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type instancePair struct { + LastUpgrade *urproto.Upgrade + Instance Instance + Error error +} + +func IndexHandler(cfg *Config) http.HandlerFunc { + return func(w http.ResponseWriter, _ *http.Request) { + mutex := sync.Mutex{} + networkUpgrades := make(map[string][]instancePair) + + start := time.Now() + + var wg sync.WaitGroup + for _, instance := range cfg.Instances { + wg.Add(1) + + if _, ok := networkUpgrades[instance.Network]; !ok { + networkUpgrades[instance.Network] = []instancePair{} + } + + go func() { + defer wg.Done() + + withError := func(err error) { + mutex.Lock() + defer mutex.Unlock() + + networkUpgrades[instance.Network] = append( + networkUpgrades[instance.Network], + instancePair{ + LastUpgrade: nil, + Instance: instance, + Error: err, + }, + ) + } + + address := net.JoinHostPort(instance.Host, strconv.Itoa(instance.GRPCPort)) + conn, err := grpc.NewClient(address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + withError(err) + return + } + + c := urproto.NewUpgradeRegistryClient(conn) + limit := int64(1) + listUpgradesResponse, err := c.ListUpgrades(context.Background(), &urproto.ListUpgradesRequest{ + DisableCache: false, + Limit: &limit, + }) + if err != nil { + withError(err) + return + } + + var lastUpgrade *urproto.Upgrade + if len(listUpgradesResponse.Upgrades) > 0 { + lastUpgrade = listUpgradesResponse.Upgrades[0] + } + + if lastUpgrade != nil && lastUpgrade.Network != instance.Network { + err := fmt.Errorf( + "instance %s returned upgrade for network %s, expected %s", + instance.Host, lastUpgrade.Network, instance.Network, + ) + withError(err) + return + } + + mutex.Lock() + defer mutex.Unlock() + + networkUpgrades[instance.Network] = append( + networkUpgrades[instance.Network], + instancePair{ + LastUpgrade: lastUpgrade, + Instance: instance, + }, + ) + }() + } + + wg.Wait() + end := time.Now() + + noInstances, noActive, noExecuting, noExpired, noCompleted, noErrors := uint(0), uint(0), uint(0), uint(0), uint(0), uint(0) + for network, pairs := range networkUpgrades { + noInstances += uint(len(pairs)) + + for _, pair := range pairs { + if pair.LastUpgrade != nil && pair.LastUpgrade.Status == urproto.UpgradeStatus_ACTIVE { + noActive++ + } + + if pair.LastUpgrade != nil && pair.LastUpgrade.Status == urproto.UpgradeStatus_EXECUTING { + noExecuting++ + } + + if pair.LastUpgrade != nil && pair.LastUpgrade.Status == urproto.UpgradeStatus_EXPIRED { + noExpired++ + } + + if pair.LastUpgrade != nil && pair.LastUpgrade.Status == urproto.UpgradeStatus_COMPLETED { + noCompleted++ + } + + if pair.Error != nil { + noErrors++ + } + } + + sort.Slice(networkUpgrades[network], func(i, j int) bool { + return networkUpgrades[network][i].Instance.Name > networkUpgrades[network][j].Instance.Name + }) + } + + t, err := template.ParseFS(static.Templates, "templates/index/index-proxy.html") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + + logoData, err := static.Templates.ReadFile("templates/index/logo.png") + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + + warning := "" + if noErrors > 0 { + warning = fmt.Sprintf("Encountered issues with %d instances. Some may be unreachable or failed completly, please investigate error messages", noErrors) + } + + err = t.Execute(w, struct { + NoNetworks uint + NoInstances uint + NoActive uint + NoExecuting uint + NoExpired uint + NoCompleted uint + NoErrors uint + Upgrades map[string][]instancePair + FetchTime time.Duration + LogoBase64 string + Warning string + }{ + NoNetworks: uint(len(networkUpgrades)), + NoInstances: noInstances, + NoActive: noActive, + NoExecuting: noExecuting, + NoExpired: noExpired, + NoCompleted: noCompleted, + NoErrors: noErrors, + Upgrades: networkUpgrades, + FetchTime: end.Sub(start), + LogoBase64: base64.StdEncoding.EncodeToString(logoData), + Warning: warning, + }) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + } +} diff --git a/internal/pkg/proxy/proxy.go b/internal/pkg/proxy/proxy.go new file mode 100644 index 0000000..023e4cc --- /dev/null +++ b/internal/pkg/proxy/proxy.go @@ -0,0 +1,34 @@ +package proxy + +import ( + "context" + "fmt" + "net" + "net/http" + "strconv" + + "blazar/internal/pkg/log" +) + +type Proxy struct { +} + +func NewProxy() *Proxy { + return &Proxy{} +} + +func (p *Proxy) ListenAndServe(ctx context.Context, cfg *Config) error { + logger := log.FromContext(ctx) + httpAddr := net.JoinHostPort(cfg.Host, strconv.Itoa(int(cfg.HTTPPort))) + + // register handlers + http.HandleFunc("/", IndexHandler(cfg)) + + logger.Infof("serving http server on %s", httpAddr) + if err := http.ListenAndServe(httpAddr, nil); err != nil { + fmt.Println("error serving http server", err) + panic(err) + } + + return nil +} diff --git a/internal/pkg/state_machine/state_machine.go b/internal/pkg/state_machine/state_machine.go new file mode 100644 index 0000000..006c23f --- /dev/null +++ b/internal/pkg/state_machine/state_machine.go @@ -0,0 +1,350 @@ +package state_machine + +import ( + "context" + "fmt" + "slices" + "sync" + + checksproto "blazar/internal/pkg/proto/daemon" + urproto "blazar/internal/pkg/proto/upgrades_registry" +) + +// The rule are are as follows: +// 1. upgrades coming from the providers have one of the following statuses (`upgrade.Status`) +// - UNKNOWN +// - SCHEDULED +// - ACTIVE +// - CANCELLED +// +// The provider can update the upgrade status from eg. SCHEDULED to ACTIVE when an onchain +// governance proposal passed but statuses such as ACTIVE, EXECUTING, COMPLETED are only used by +// blazar state machine. +var ( + allowedInputStatuses = []urproto.UpgradeStatus{ + urproto.UpgradeStatus_UNKNOWN, + urproto.UpgradeStatus_SCHEDULED, + urproto.UpgradeStatus_ACTIVE, + urproto.UpgradeStatus_CANCELLED, + } + + statusManagedByStateMachine = []urproto.UpgradeStatus{ + urproto.UpgradeStatus_EXECUTING, + urproto.UpgradeStatus_COMPLETED, + urproto.UpgradeStatus_FAILED, + urproto.UpgradeStatus_EXPIRED, + } +) + +func init() { + // sanity check + if len(urproto.UpgradeStatus_value) != len(allowedInputStatuses)+len(statusManagedByStateMachine) { + panic(fmt.Sprintf("allowedInputStatuses and statusManagedByStateMachine do not cover all upgrade statuses. allowedInputStatuses: %d, statusManagedByStateMachine: %d, total: %d", len(allowedInputStatuses), len(statusManagedByStateMachine), len(urproto.UpgradeStatus_value))) + } +} + +type StateMachineStorage interface { + StoreState(context.Context, *State) error + RestoreState(context.Context) (*State, error) +} + +type State struct { + UpgradeStatus map[int64]urproto.UpgradeStatus `json:"status"` + UpgradeStep map[int64]urproto.UpgradeStep `json:"steps"` + + PreCheckStatus map[int64]map[checksproto.PreCheck]checksproto.CheckStatus `json:"pre_check_status"` + PostCheckStatus map[int64]map[checksproto.PostCheck]checksproto.CheckStatus `json:"post_check_status"` +} + +// Simple, unsphisitcated state machine for managing upgrades +type StateMachine struct { + lock *sync.RWMutex + state *State + + storage StateMachineStorage +} + +func NewStateMachine(storage StateMachineStorage) *StateMachine { + return &StateMachine{ + lock: &sync.RWMutex{}, + state: &State{ + UpgradeStatus: make(map[int64]urproto.UpgradeStatus, 0), + UpgradeStep: make(map[int64]urproto.UpgradeStep, 0), + + PreCheckStatus: make(map[int64]map[checksproto.PreCheck]checksproto.CheckStatus, 0), + PostCheckStatus: make(map[int64]map[checksproto.PostCheck]checksproto.CheckStatus, 0), + }, + storage: storage, + } +} + +func (sm *StateMachine) UpdateStatus(currentHeight int64, upgrades map[int64]*urproto.Upgrade) { + sm.lock.Lock() + defer sm.lock.Unlock() + defer sm.persist() + + for _, upgrade := range upgrades { + if !slices.Contains(allowedInputStatuses, upgrade.Status) { + panic(fmt.Sprintf("invalid upgrade status set in upgrade.Status field: %s. The list of allowed status: %s", upgrade.Status.String(), allowedInputStatuses)) + } + + // set status if it doesn't exist + if _, ok := sm.state.UpgradeStatus[upgrade.Height]; !ok { + sm.state.UpgradeStatus[upgrade.Height] = upgrade.Status + } + + if _, ok := sm.state.UpgradeStep[upgrade.Height]; !ok { + sm.state.UpgradeStep[upgrade.Height] = urproto.UpgradeStep_NONE + } + } + + // remove upgrade status that are not in the new list + for height := range sm.state.UpgradeStatus { + if _, ok := upgrades[height]; !ok { + // TODO: consider maybe setting the status to CANCELLED instead of removing the upgrade + delete(sm.state.UpgradeStatus, height) + delete(sm.state.UpgradeStep, height) + } + } + + for _, upgrade := range upgrades { + // if the upgrade is cancelled then there is nothing to do, we simply update the status + // NOTE: the upgrade.status is set by provider (eg. chain provider) and the state machine state cancelled is set by a human through rpc etc + if upgrade.Status == urproto.UpgradeStatus_CANCELLED || sm.state.UpgradeStatus[upgrade.Height] == urproto.UpgradeStatus_CANCELLED { + sm.state.UpgradeStatus[upgrade.Height] = urproto.UpgradeStatus_CANCELLED + continue + } + + switch upgrade.Type { + case urproto.UpgradeType_GOVERNANCE: + // if the new status is coming from governance proposal then we update + // otherwise it must have been set by a blazar instance while processing upgrade + if !slices.Contains(statusManagedByStateMachine, sm.state.UpgradeStatus[upgrade.Height]) { + sm.state.UpgradeStatus[upgrade.Height] = upgrade.Status + } + case urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, urproto.UpgradeType_NON_GOVERNANCE_UNCOORDINATED: + // mark the upgrade as 'ready for exection' (active) + if !slices.Contains(statusManagedByStateMachine, sm.state.UpgradeStatus[upgrade.Height]) { + if upgrade.Height > currentHeight { + sm.state.UpgradeStatus[upgrade.Height] = urproto.UpgradeStatus_ACTIVE + } + } + + default: + panic(fmt.Sprintf("unknown upgrade type %s", upgrade.Type.String())) + } + } + + // sanity check + if len(sm.state.UpgradeStatus) != len(upgrades) { + panic(fmt.Sprintf("upgrade status map length %d does not match upgrade list length %d", len(sm.state.UpgradeStatus), len(upgrades))) + } + + // handle other status changes + for _, upgrade := range upgrades { + status := sm.state.UpgradeStatus[upgrade.Height] + + // handle expired upgrades + isPastUpgrade := upgrade.Height < currentHeight + if isPastUpgrade && status != urproto.UpgradeStatus_CANCELLED && !slices.Contains(statusManagedByStateMachine, status) { + sm.state.UpgradeStatus[upgrade.Height] = urproto.UpgradeStatus_EXPIRED + } + } +} + +func (sm *StateMachine) MustSetStatus(height int64, status urproto.UpgradeStatus) { + if err := sm.SetStatus(height, status); err != nil { + panic(err) + } +} + +func (sm *StateMachine) SetStatus(height int64, status urproto.UpgradeStatus) error { + sm.lock.Lock() + defer sm.lock.Unlock() + defer sm.persist() + + return sm.setStatus(height, status, false) +} + +func (sm *StateMachine) SetStep(height int64, step urproto.UpgradeStep) { + sm.lock.Lock() + defer sm.lock.Unlock() + defer sm.persist() + + sm.state.UpgradeStep[height] = step +} + +func (sm *StateMachine) MustSetStatusAndStep(height int64, status urproto.UpgradeStatus, step urproto.UpgradeStep) { + if err := sm.SetStatusAndStep(height, status, step); err != nil { + panic(err) + } +} + +func (sm *StateMachine) SetStatusAndStep(height int64, status urproto.UpgradeStatus, step urproto.UpgradeStep) error { + sm.lock.Lock() + defer sm.lock.Unlock() + defer sm.persist() + + if err := sm.setStatus(height, status, false); err != nil { + return err + } + sm.state.UpgradeStep[height] = step + + return nil +} + +func (sm *StateMachine) GetStatus(height int64) urproto.UpgradeStatus { + sm.lock.RLock() + defer sm.lock.RUnlock() + + if status, ok := sm.state.UpgradeStatus[height]; ok { + return status + } + + return urproto.UpgradeStatus_UNKNOWN +} + +func (sm *StateMachine) GetStep(height int64) urproto.UpgradeStep { + sm.lock.RLock() + defer sm.lock.RUnlock() + + if step, ok := sm.state.UpgradeStep[height]; ok { + return step + } + + return urproto.UpgradeStep_NONE +} + +func (sm *StateMachine) SetPreCheckStatus(height int64, check checksproto.PreCheck, status checksproto.CheckStatus) { + sm.lock.Lock() + defer sm.lock.Unlock() + defer sm.persist() + + if _, ok := sm.state.PreCheckStatus[height]; !ok { + sm.state.PreCheckStatus[height] = make(map[checksproto.PreCheck]checksproto.CheckStatus) + } + sm.state.PreCheckStatus[height][check] = status +} + +func (sm *StateMachine) SetPostCheckStatus(height int64, check checksproto.PostCheck, status checksproto.CheckStatus) { + sm.lock.Lock() + defer sm.lock.Unlock() + defer sm.persist() + + if _, ok := sm.state.PostCheckStatus[height]; !ok { + sm.state.PostCheckStatus[height] = make(map[checksproto.PostCheck]checksproto.CheckStatus) + } + sm.state.PostCheckStatus[height][check] = status +} + +func (sm *StateMachine) GetPreCheckStatus(height int64, check checksproto.PreCheck) checksproto.CheckStatus { + sm.lock.RLock() + defer sm.lock.RUnlock() + + if checkStatus, ok := sm.state.PreCheckStatus[height][check]; ok { + return checkStatus + } + + return checksproto.CheckStatus_PENDING +} + +func (sm *StateMachine) GetPostCheckStatus(height int64, check checksproto.PostCheck) checksproto.CheckStatus { + sm.lock.RLock() + defer sm.lock.RUnlock() + + if checkStatus, ok := sm.state.PostCheckStatus[height][check]; ok { + return checkStatus + } + + return checksproto.CheckStatus_PENDING +} + +func (sm *StateMachine) Restore(ctx context.Context) error { + if sm.storage == nil { + // if it wasn't configured then we don't need to restore the state + return nil + } + + state, err := sm.storage.RestoreState(ctx) + if err != nil { + return err + } + + // the state was likely not initialized yet + if state == nil { + return nil + } + + // initialize the state if it's not initialized + if state.UpgradeStatus == nil { + state.UpgradeStatus = make(map[int64]urproto.UpgradeStatus, 0) + } + + if state.UpgradeStep == nil { + state.UpgradeStep = make(map[int64]urproto.UpgradeStep, 0) + } + + if state.PreCheckStatus == nil { + state.PreCheckStatus = make(map[int64]map[checksproto.PreCheck]checksproto.CheckStatus, 0) + } + + if state.PostCheckStatus == nil { + state.PostCheckStatus = make(map[int64]map[checksproto.PostCheck]checksproto.CheckStatus, 0) + } + + sm.lock.Lock() + defer sm.lock.Unlock() + sm.state = state + + return nil +} + +func (sm *StateMachine) setStatus(height int64, status urproto.UpgradeStatus, lock bool) error { + if lock { + sm.lock.Lock() + defer sm.lock.Unlock() + defer sm.persist() + } + + // we can't cancel the upgrade if it's already being executed, expired, failed etc + if currentStatus, ok := sm.state.UpgradeStatus[height]; ok && status == urproto.UpgradeStatus_CANCELLED { + if currentStep, ok := sm.state.UpgradeStep[height]; ok { + isExecuting := currentStatus == urproto.UpgradeStatus_EXECUTING && !slices.Contains([]urproto.UpgradeStep{ + urproto.UpgradeStep_NONE, + urproto.UpgradeStep_MONITORING, + urproto.UpgradeStep_PRE_UPGRADE_CHECK, + }, currentStep) + + if isExecuting || slices.Contains([]urproto.UpgradeStatus{ + urproto.UpgradeStatus_EXPIRED, + urproto.UpgradeStatus_COMPLETED, + urproto.UpgradeStatus_FAILED, + }, currentStatus) { + return fmt.Errorf("cannot cancel upgrade %d with status %s and step %s", height, currentStatus.String(), currentStep.String()) + } + } + } + + // handle invalid state transitions + if currentStatus, ok := sm.state.UpgradeStatus[height]; ok { + executingTransition := currentStatus == urproto.UpgradeStatus_EXECUTING && (status == urproto.UpgradeStatus_SCHEDULED || status == urproto.UpgradeStatus_ACTIVE) + completedTransition := currentStatus == urproto.UpgradeStatus_COMPLETED && status != urproto.UpgradeStatus_COMPLETED + failedTransition := currentStatus == urproto.UpgradeStatus_FAILED && status != urproto.UpgradeStatus_FAILED + cancelledTransition := currentStatus == urproto.UpgradeStatus_CANCELLED && status != urproto.UpgradeStatus_CANCELLED + + if executingTransition || completedTransition || failedTransition || cancelledTransition { + return fmt.Errorf("staus transition from %s to %s is not allowed", currentStatus.String(), status.String()) + } + } + + sm.state.UpgradeStatus[height] = status + return nil +} + +func (sm *StateMachine) persist() { + // TODO: For now we ignore writing to the storage errors because this is not a critical operation + // NOTE: The caller must hold the lock + if sm.storage != nil { + _ = sm.storage.StoreState(context.TODO(), sm.state) + } +} diff --git a/internal/pkg/state_machine/state_machine_test.go b/internal/pkg/state_machine/state_machine_test.go new file mode 100644 index 0000000..374a469 --- /dev/null +++ b/internal/pkg/state_machine/state_machine_test.go @@ -0,0 +1,319 @@ +package state_machine + +import ( + "testing" + + urproto "blazar/internal/pkg/proto/upgrades_registry" + + "github.com/stretchr/testify/assert" +) + +// Asserts that the state machine panics when it receives an upgrade with an initial status that is not managed by the state machine +func TestStateMachineInitialUpgradeStates(t *testing.T) { + for upgradeStatus, shouldFail := range map[urproto.UpgradeStatus]bool{ + // states managed by the provider or manually + urproto.UpgradeStatus_UNKNOWN: false, + urproto.UpgradeStatus_SCHEDULED: false, + urproto.UpgradeStatus_ACTIVE: false, + urproto.UpgradeStatus_CANCELLED: false, + + // states managed by the state machine + urproto.UpgradeStatus_EXECUTING: true, + urproto.UpgradeStatus_COMPLETED: true, + urproto.UpgradeStatus_FAILED: true, + urproto.UpgradeStatus_EXPIRED: true, + } { + currentHeight := int64(100) + upgrades := []*urproto.Upgrade{ + { + Height: 200, + Tag: "v1.0.0", + Name: "test upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: upgradeStatus, + Source: urproto.ProviderType_CHAIN, + }, + } + + upgradesMap := make(map[int64]*urproto.Upgrade) + for _, upgrade := range upgrades { + upgradesMap[upgrade.Height] = upgrade + } + + stateMachine := NewStateMachine(nil) + if shouldFail { + assert.Panics(t, func() { + stateMachine.UpdateStatus(currentHeight, upgradesMap) + }) + } else { + assert.NotPanics(t, func() { + stateMachine.UpdateStatus(currentHeight, upgradesMap) + }) + } + } +} + +// Asserts that the state machine panics when it receives an upgrade with an initial status that is not managed by the state machine +func TestStateMachineUpgradesAreDeleted(t *testing.T) { + currentHeight := int64(100) + upgrades := []*urproto.Upgrade{ + { + Height: 200, + Tag: "v1.0.0", + Name: "test upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_ACTIVE, + Source: urproto.ProviderType_CHAIN, + }, + { + Height: 400, + Tag: "v1.0.0", + Name: "test upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_ACTIVE, + Source: urproto.ProviderType_CHAIN, + }, + } + + upgradesMap := make(map[int64]*urproto.Upgrade) + for _, upgrade := range upgrades { + upgradesMap[upgrade.Height] = upgrade + } + + stateMachine := NewStateMachine(nil) + stateMachine.UpdateStatus(currentHeight, upgradesMap) + + assert.Equal(t, urproto.UpgradeStatus_ACTIVE, stateMachine.GetStatus(200)) + + // remove the upgrade with height 200 + delete(upgradesMap, 200) + stateMachine.UpdateStatus(currentHeight, upgradesMap) + + assert.Equal(t, urproto.UpgradeStatus_UNKNOWN, stateMachine.GetStatus(200)) + assert.Equal(t, urproto.UpgradeStatus_ACTIVE, stateMachine.GetStatus(400)) +} + +// Asserts that the state machine sets the expiry status correctly +func TestStateMachineExpiry(t *testing.T) { + type testType struct { + initialStatus urproto.UpgradeStatus + expectedStatus urproto.UpgradeStatus + step urproto.UpgradeStep + } + + tests := []testType{ + {urproto.UpgradeStatus_UNKNOWN, urproto.UpgradeStatus_EXPIRED, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_SCHEDULED, urproto.UpgradeStatus_EXPIRED, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_ACTIVE, urproto.UpgradeStatus_EXPIRED, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStep_NONE}, + + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_FAILED, urproto.UpgradeStep_NONE}, + + // check if the active step doesn't mess up with the states + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStep_COMPOSE_FILE_UPGRADE}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStep_POST_UPGRADE_CHECK}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_FAILED, urproto.UpgradeStep_COMPOSE_FILE_UPGRADE}, + + // if blazar missed the upgrade, this is the upgrade block is passed and it didn't transition into executing + // the status is set to expire. While it is possible such option can happen, handling the past upgrades that "should be executed" + // makes things more complicated + {urproto.UpgradeStatus_ACTIVE, urproto.UpgradeStatus_EXPIRED, urproto.UpgradeStep_MONITORING}, + } + + for upgradeType, tests := range map[urproto.UpgradeType][]testType{ + urproto.UpgradeType_GOVERNANCE: tests, + urproto.UpgradeType_NON_GOVERNANCE_COORDINATED: tests, + urproto.UpgradeType_NON_GOVERNANCE_UNCOORDINATED: tests, + } { + for _, test := range tests { + currentHeight := int64(100) + upgrades := []*urproto.Upgrade{ + { + Height: 50, // this upgrade time has passed + Tag: "v1.0.0", + Name: "test upgrade", + Type: upgradeType, + Status: urproto.UpgradeStatus_UNKNOWN, + }, + } + + upgradesMap := make(map[int64]*urproto.Upgrade) + for _, upgrade := range upgrades { + upgradesMap[upgrade.Height] = upgrade + } + + stateMachine := NewStateMachine(nil) + _ = stateMachine.SetStatus(upgrades[0].Height, test.initialStatus) + stateMachine.SetStep(upgrades[0].Height, test.step) + assert.Equal(t, test.initialStatus, stateMachine.GetStatus(upgrades[0].Height)) + + stateMachine.UpdateStatus(currentHeight, upgradesMap) + assert.Equal(t, test.expectedStatus, stateMachine.GetStatus(upgrades[0].Height)) + } + } +} + +// Asserts the ability to cancel an upgrade +func TestStateMachineCancellation(t *testing.T) { + type testType struct { + initialStatus urproto.UpgradeStatus + expectedStatus urproto.UpgradeStatus + step urproto.UpgradeStep + } + + // The rule of thumb is that a user can cancel upgrade only if: + // * the upgrade is not being executed yet (status != EXECUTING or anything past that, like COMPLETED or FAILED) + // * if the upgrade started and current step is NONE or MONITORING or in PRE CHECK (anything past that is not cancellable) + allCancellableTests := []testType{ + {urproto.UpgradeStatus_UNKNOWN, urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_SCHEDULED, urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_ACTIVE, urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStep_NONE}, + + // we allow to cancel if the upgrade didn't start yet (as in, no step has been taken yet) + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStep_NONE}, + + // if current step is MONITORING, we can still cancel the upgrade + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStep_MONITORING}, + + // if current step is PRE_UPGRADE_CHECK, we can still cancel the upgrade + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStep_PRE_UPGRADE_CHECK}, + + // if the upgrade is already being executed, we can't cancel it + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStep_COMPOSE_FILE_UPGRADE}, + + // if the upgrade is already being executed, we can't cancel it (because it already happened, right?) + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStep_POST_UPGRADE_CHECK}, + + // if upgrade is expired, completed, failed and cancelled then we can't cancel it + {urproto.UpgradeStatus_EXPIRED, urproto.UpgradeStatus_EXPIRED, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStep_NONE}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_FAILED, urproto.UpgradeStep_NONE}, + } + + for upgradeType, tests := range map[urproto.UpgradeType][]testType{ + urproto.UpgradeType_GOVERNANCE: allCancellableTests, + urproto.UpgradeType_NON_GOVERNANCE_COORDINATED: allCancellableTests, + urproto.UpgradeType_NON_GOVERNANCE_UNCOORDINATED: allCancellableTests, + } { + for _, test := range tests { + currentHeight := int64(100) + upgrades := []*urproto.Upgrade{ + { + Height: 150, + Tag: "v1.0.0", + Name: "test upgrade", + Type: upgradeType, + Status: urproto.UpgradeStatus_UNKNOWN, + }, + } + + upgradesMap := make(map[int64]*urproto.Upgrade) + for _, upgrade := range upgrades { + upgradesMap[upgrade.Height] = upgrade + } + + stateMachine := NewStateMachine(nil) + + // set initial status and step + _ = stateMachine.SetStatus(upgrades[0].Height, test.initialStatus) + stateMachine.SetStep(upgrades[0].Height, test.step) + stateMachine.UpdateStatus(currentHeight, upgradesMap) + + // simulate the cancellation + _ = stateMachine.SetStatus(upgrades[0].Height, urproto.UpgradeStatus_CANCELLED) + stateMachine.SetStep(upgrades[0].Height, test.step) + stateMachine.UpdateStatus(currentHeight, upgradesMap) + + assert.Equal(t, test.expectedStatus, stateMachine.GetStatus(upgrades[0].Height)) + } + } +} + +// Asserts clearly invalid upgrade state transitions are not allowed +func TestStateMachineInvalidStateTransitions(t *testing.T) { + type testType struct { + initialStatus urproto.UpgradeStatus + newStatus urproto.UpgradeStatus + expectError bool + } + + allTests := []testType{ + // if the upgrade was executing then regressing to prior states is likely a bug + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_SCHEDULED, true}, + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_ACTIVE, true}, + + // TODO: I am not sure if we should allow this transition + {urproto.UpgradeStatus_EXECUTING, urproto.UpgradeStatus_EXPIRED, false}, + + // upgrade in completed state is final and can't be changed + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_UNKNOWN, true}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_SCHEDULED, true}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_ACTIVE, true}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_EXECUTING, true}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_COMPLETED, false}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_FAILED, true}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_CANCELLED, true}, + {urproto.UpgradeStatus_COMPLETED, urproto.UpgradeStatus_EXPIRED, true}, + + // upgrade in failed state is final and can't be changed + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_UNKNOWN, true}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_SCHEDULED, true}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_ACTIVE, true}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_EXECUTING, true}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_COMPLETED, true}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_FAILED, false}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_CANCELLED, true}, + {urproto.UpgradeStatus_FAILED, urproto.UpgradeStatus_EXPIRED, true}, + + // upgrade in cancelled state is final and can't be changed + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_UNKNOWN, true}, + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_SCHEDULED, true}, + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_ACTIVE, true}, + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_EXECUTING, true}, + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_COMPLETED, true}, + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_FAILED, true}, + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_CANCELLED, false}, + {urproto.UpgradeStatus_CANCELLED, urproto.UpgradeStatus_EXPIRED, true}, + } + + for upgradeType, tests := range map[urproto.UpgradeType][]testType{ + urproto.UpgradeType_GOVERNANCE: allTests, + urproto.UpgradeType_NON_GOVERNANCE_COORDINATED: allTests, + urproto.UpgradeType_NON_GOVERNANCE_UNCOORDINATED: allTests, + } { + for _, test := range tests { + currentHeight := int64(100) + upgrades := []*urproto.Upgrade{ + { + Height: 150, + Tag: "v1.0.0", + Name: "test upgrade", + Type: upgradeType, + Status: urproto.UpgradeStatus_UNKNOWN, + }, + } + + upgradesMap := make(map[int64]*urproto.Upgrade) + for _, upgrade := range upgrades { + upgradesMap[upgrade.Height] = upgrade + } + + stateMachine := NewStateMachine(nil) + + // set initial status and step + _ = stateMachine.SetStatus(upgrades[0].Height, test.initialStatus) + stateMachine.UpdateStatus(currentHeight, upgradesMap) + + // simulate the state change attempt + err := stateMachine.SetStatus(upgrades[0].Height, test.newStatus) + if test.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } + } +} diff --git a/internal/pkg/static/static.go b/internal/pkg/static/static.go new file mode 100644 index 0000000..bdb0144 --- /dev/null +++ b/internal/pkg/static/static.go @@ -0,0 +1,6 @@ +package static + +import "embed" + +//go:embed templates +var Templates embed.FS diff --git a/internal/pkg/static/templates/index/index-blazar.html b/internal/pkg/static/templates/index/index-blazar.html new file mode 100644 index 0000000..240123e --- /dev/null +++ b/internal/pkg/static/templates/index/index-blazar.html @@ -0,0 +1,426 @@ + + + + + + + Blazar + + + + + + + + + +
+
+
+ +
+
+
+

Blazar Status ({{ .Hostname }})

+

Automatic node upgrades for Cosmos SDK networks

+
+
+

+

Network{{ .DefaultNetwork }}
+
Current Height{{ .CurrentBlockHeight }}
+
Last Update{{ .LastUpdateTime }} ({{ .LastUpdateDiff }} ago)
+
Avg Block Speed{{ .BlockSpeed }}s
+
Time to next sync{{ .SecondsToNextUpdate }}s
+ +

+
+ {{ if .Warning }} +

{{ .Warning }}

+ {{ else }} +

Blazar is synchronized! Now you should see the list of registered upgrades observed by this blazar instance.

+ {{ end }} +
+
+
+
+ + +
+ +
+

Actions

+ +
+ Register New Upgrade +
+
+ + + + + + + + +
+ + + +
+
+ + +
+ Register New Version +
+
+ + + + +
+ + + +
+
+ + +
+ Cancel Upgrade +
+
+ + +
+ + + +
+
+
+ + +
+

Registered Upgrades

+
+ + + + + + + + + + + + + + + + + + + {{range $index, $element := .Upgrades}} + + + + + + + + + + + + + + + {{end}} + +
HeightTagNetworkNameTypeStatusStepPrioritySourceProposalIDETA (Blocks)ETA
{{ $element.Height }} + {{ if $element.Tag }} + {{ $element.Tag }} + {{ else }} +
+ {{ end }} +
{{ $element.Network }}{{ $element.Name }}{{ $element.Type }}{{ $element.Status }}{{ $element.Step }}{{ $element.Priority }}{{ $element.Source }}{{ $element.ProposalId }} + {{ if gt .Height $.CurrentBlockHeight }} + {{ index $.BlocksToUpgrade .Height }} + {{ end }} + + {{ if gt .Height $.CurrentBlockHeight }} + {{ index $.BlocksToETA .Height }} + {{ end }} +
+
+
+ +
+ + + diff --git a/internal/pkg/static/templates/index/index-proxy.html b/internal/pkg/static/templates/index/index-proxy.html new file mode 100644 index 0000000..bb96c34 --- /dev/null +++ b/internal/pkg/static/templates/index/index-proxy.html @@ -0,0 +1,174 @@ + + + + + + + Blazar Proxy + + + + + + +
+
+
+ +
+
+
+

Blazar Proxy

+

Aggregate information about Blazar instances

+
+
+

+

Networks{{ .NoNetworks }}
+
Instances{{ .NoInstances }}
+
Errors{{ .NoErrors }}
+
Active{{ .NoActive }}
+
Executing{{ .NoExecuting }}
+
Expired{{ .NoExpired }}
+
Completed{{ .NoCompleted }}
+
Fetch Time{{ .FetchTime }}
+

+
+ {{ if .Warning }} +

{{ .Warning }}

+ {{ else }} +

Blazar Proxy is now synchronized! Now you should see the aggregate list of the latest upgrades from each instance

+ {{ end }} +
+
+
+
+ + +
+ +
+ {{if (lt .NoErrors .NoInstances) }} +

Registered Upgrades

+
+ + + + + + + + + + + + + + + + + {{range $network, $pairs := .Upgrades}} + {{range $index, $pair := $pairs}} + {{if not $pair.Error }} + + + {{if $pair.LastUpgrade}} + + + + + + + + + + {{else}} + + + + + + + + + + {{end}} + + {{end}} + {{end}} + {{end}} + +
InstanceHeightTagNetworkNameTypeStatusStepPrioritySource
+ + {{ $pair.Instance.Name }} + + {{ $pair.LastUpgrade.Height }} + {{ if $pair.LastUpgrade.Tag }} + {{ $pair.LastUpgrade.Tag }} + {{ else }} +
+ {{ end }} +
{{ $pair.LastUpgrade.Network }}{{ $pair.LastUpgrade.Name }}{{ $pair.LastUpgrade.Type }}{{ $pair.LastUpgrade.Status }}{{ $pair.LastUpgrade.Step }}{{ $pair.LastUpgrade.Priority }}{{ $pair.LastUpgrade.Source }}
+
+ {{end}} + {{if .NoErrors }} +

Proxy Errors

+
+ + + + + + + + + + {{range $network, $pairs := .Upgrades}} + {{range $index, $pair := $pairs}} + {{if $pair.Error }} + + + + + + {{end}} + {{end}} + {{end}} + +
InstanceNetworkMessage
+ + {{ $pair.Instance.Name }} + + {{$pair.Instance.Network}}{{$pair.Error}}
+
+
+ {{end}} + +
+ + + diff --git a/internal/pkg/static/templates/index/logo.png b/internal/pkg/static/templates/index/logo.png new file mode 100644 index 0000000..21944ab Binary files /dev/null and b/internal/pkg/static/templates/index/logo.png differ diff --git a/internal/pkg/testutils/util.go b/internal/pkg/testutils/util.go new file mode 100644 index 0000000..b7d8317 --- /dev/null +++ b/internal/pkg/testutils/util.go @@ -0,0 +1,132 @@ +package testutils + +import ( + "context" + "fmt" + "html/template" + "io" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "blazar/internal/pkg/log/logger" + + "github.com/otiai10/copy" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" +) + +const TestdataDirPath = "../../../testdata" + +func WriteTmpl(file string, data interface{}) error { + t, err := template.New(filepath.Base(file)).ParseFiles(file) + if err != nil { + return err + } + + newF := strings.Replace(file, ".tmpl", "", 1) + f, err := os.OpenFile(newF, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return err + } + + err = t.Execute(f, data) + if err != nil { + return err + } + + return f.Close() +} + +func BuildTestImages(ctx context.Context, dockerProvider *testcontainers.DockerProvider) (string, string) { + simd1RepoTag, err := dockerProvider.BuildImage(ctx, &testcontainers.ContainerRequest{ + FromDockerfile: testcontainers.FromDockerfile{ + Context: "../../../testdata/daemon/images/v0.0.1/", + }, + }) + + if err != nil { + fmt.Println("failed to build simd v0.0.1 container") + os.Exit(1) + } + + simd2RepoTag, err := dockerProvider.BuildImage(ctx, &testcontainers.ContainerRequest{ + FromDockerfile: testcontainers.FromDockerfile{ + Repo: strings.Split(simd1RepoTag, ":")[0], + Context: "../../../testdata/daemon/images/v0.0.2/", + }, + }) + + if err != nil { + fmt.Println("failed to build simd v0.0.2 container") + os.Exit(1) + } + + return simd1RepoTag, simd2RepoTag +} + +func PrepareTestData(t *testing.T, prefix, path, dst string) string { + tempDir := t.TempDir() + pth := filepath.Join(prefix, path) + target := filepath.Join(tempDir, dst) + + MustCopy(t, pth, target) + + return target +} + +func MustCopy(t *testing.T, src, dst string) { + err := copy.Copy(filepath.Join(TestdataDirPath, src), dst) + assert.NoError(t, err) +} + +func NewChainHomeDir(t *testing.T) (string, string) { + chainHome := t.TempDir() + blazarDir, err := filepath.Abs(filepath.Join(chainHome, "blazar")) + require.NoError(t, err) + + err = os.Mkdir(filepath.Join(chainHome, "blazar"), 0755) + require.NoError(t, err) + + chainHomeAbs, err := filepath.Abs(chainHome) + assert.NoError(t, err) + + return chainHomeAbs, blazarDir +} + +func MakeImageWith(t *testing.T, imageName, tag string, dockerProvider *testcontainers.DockerProvider) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + _, err := dockerProvider.BuildImage(ctx, &testcontainers.ContainerRequest{ + FromDockerfile: testcontainers.FromDockerfile{ + Context: filepath.Join(TestdataDirPath, "docker", "sleep-dockerfile"), + Repo: imageName, + Tag: tag, + }, + }) + require.NoError(t, err) +} + +func MakeEnvEchoImageWith(t *testing.T, dockerProvider *testcontainers.DockerProvider) { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + _, err := dockerProvider.BuildImage(ctx, &testcontainers.ContainerRequest{ + FromDockerfile: testcontainers.FromDockerfile{ + Context: filepath.Join(TestdataDirPath, "docker", "echo-dockerfile"), + Repo: "testrepo/env-echo", + Tag: "latest", + }, + }) + require.NoError(t, err) +} + +func NewContext() context.Context { + lg := zerolog.New(io.Discard).With().Logger() + return logger.WithContext(context.Background(), &lg) +} diff --git a/internal/pkg/upgrades_registry/upgrades_registry.go b/internal/pkg/upgrades_registry/upgrades_registry.go new file mode 100644 index 0000000..b6154e0 --- /dev/null +++ b/internal/pkg/upgrades_registry/upgrades_registry.go @@ -0,0 +1,675 @@ +package upgrades_registry + +import ( + "context" + "fmt" + "slices" + "sort" + "sync" + "time" + + "blazar/internal/pkg/config" + "blazar/internal/pkg/cosmos" + "blazar/internal/pkg/errors" + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" + "blazar/internal/pkg/provider" + "blazar/internal/pkg/provider/chain" + "blazar/internal/pkg/provider/database" + "blazar/internal/pkg/provider/local" + "blazar/internal/pkg/state_machine" + + "golang.org/x/sync/errgroup" + "google.golang.org/protobuf/proto" +) + +type SyncInfo struct { + LastBlockHeight int64 + LastUpdateTime time.Time +} + +type UpgradeRegistry struct { + // a list of provider to fetch upgrades from (e.g. chain, database, local) + providers map[urproto.ProviderType]provider.UpgradeProvider + + // a list provider to fetch versions from (e.g. chain, database, local) + versionProviders []urproto.ProviderType + + // a state machine containing the current status of all upgrades + stateMachine *state_machine.StateMachine + + // lock for the registry + lock *sync.RWMutex + + // a list of latst fetched upgrades + upgrades map[int64]*urproto.Upgrade + + // a list of versions fetched from providers + versions map[int64]*vrproto.Version + + // a list of upgrades that were overridden by another upgrade with the same height and higher priority + overriddenUpgrades map[int64][]*urproto.Upgrade + + // a list of versions that were overridden by another version with the same height and higher priority + overriddenVersions map[int64][]*vrproto.Version + + // information about the last sync + syncInfo SyncInfo + + // network for which the registry is created + network string +} + +func NewUpgradeRegistry(providers map[urproto.ProviderType]provider.UpgradeProvider, versionProviders []urproto.ProviderType, stateMachine *state_machine.StateMachine, network string) *UpgradeRegistry { + return &UpgradeRegistry{ + providers: providers, + versionProviders: versionProviders, + lock: &sync.RWMutex{}, + upgrades: make(map[int64]*urproto.Upgrade, 0), + versions: make(map[int64]*vrproto.Version, 0), + overriddenUpgrades: make(map[int64][]*urproto.Upgrade), + overriddenVersions: make(map[int64][]*vrproto.Version), + stateMachine: stateMachine, + syncInfo: SyncInfo{}, + network: network, + } +} + +func NewUpgradesRegistryFromConfig(cfg *config.Config) (*UpgradeRegistry, error) { + providers := make(map[urproto.ProviderType]provider.UpgradeProvider, 0) + + if cfg.UpgradeRegistry.Provider.Chain != nil && slices.Contains( + cfg.UpgradeRegistry.SelectedProviders, urproto.ProviderType_name[int32(urproto.ProviderType_CHAIN)], + ) { + cosmosClient, err := cosmos.NewClient(cfg.Clients.Host, cfg.Clients.GrpcPort, cfg.Clients.CometbftPort, cfg.Clients.Timeout) + if err != nil { + return nil, errors.Wrapf(err, "failed to create cosmos client") + } + + if err := cosmosClient.StartCometbftClient(); err != nil { + return nil, errors.Wrapf(err, "failed to start cometbft client") + } + + provider := chain.NewProvider(cosmosClient, cfg.UpgradeRegistry.Network, cfg.UpgradeRegistry.Provider.Chain.DefaultPriority) + providers[provider.Type()] = provider + } + + if cfg.UpgradeRegistry.Provider.Database != nil && slices.Contains( + cfg.UpgradeRegistry.SelectedProviders, urproto.ProviderType_name[int32(urproto.ProviderType_DATABASE)], + ) { + provider, err := database.NewDatabaseProvider( + cfg.UpgradeRegistry.Provider.Database, + cfg.UpgradeRegistry.Network, + ) + if err != nil { + return nil, errors.Wrapf(err, "failed to create database provider") + } + providers[provider.Type()] = provider + } + + if cfg.UpgradeRegistry.Provider.Local != nil && slices.Contains( + cfg.UpgradeRegistry.SelectedProviders, urproto.ProviderType_name[int32(urproto.ProviderType_LOCAL)], + ) { + provider, err := local.NewProvider( + cfg.UpgradeRegistry.Provider.Local.ConfigPath, + cfg.UpgradeRegistry.Network, + cfg.UpgradeRegistry.Provider.Local.DefaultPriority, + ) + if err != nil { + return nil, errors.Wrapf(err, "failed to create local provider") + } + providers[provider.Type()] = provider + } + + versionProviders := make([]urproto.ProviderType, 0) + if cfg.UpgradeRegistry.VersionResolvers != nil { + for _, providerName := range cfg.UpgradeRegistry.VersionResolvers.Providers { + providerType := urproto.ProviderType(urproto.ProviderType_value[providerName]) + if _, ok := providers[providerType].(provider.VersionResolver); !ok { + return nil, fmt.Errorf("version resolver provider %s does not implement VersionResolver interface", providerName) + } + versionProviders = append(versionProviders, providerType) + } + } + + // handle state machine storage provider + var stateMachine *state_machine.StateMachine + if cfg.UpgradeRegistry.StateMachine.Provider != "" { + if cfg.UpgradeRegistry.StateMachine.Provider != urproto.ProviderType_name[int32(urproto.ProviderType_LOCAL)] { + return nil, fmt.Errorf("state machine storage provider %s is not supported (only 'local' is supported now)", cfg.UpgradeRegistry.StateMachine.Provider) + } + + providerType := urproto.ProviderType(urproto.ProviderType_value[cfg.UpgradeRegistry.StateMachine.Provider]) + localProvider := providers[providerType].(*local.Provider) + stateMachine = state_machine.NewStateMachine(localProvider) + } + + // state machine without storage provider is okay, everything will be stored in memory + if stateMachine == nil { + stateMachine = state_machine.NewStateMachine(nil) + } + + // TODO: context in constructor aint great + err := stateMachine.Restore(context.Background()) + if err != nil { + return nil, errors.Wrapf(err, "failed to restore state machine") + } + + return NewUpgradeRegistry(providers, versionProviders, stateMachine, cfg.UpgradeRegistry.Network), nil +} + +func (ur *UpgradeRegistry) GetStateMachine() *state_machine.StateMachine { + return ur.stateMachine +} + +func (ur *UpgradeRegistry) GetAllUpgradesWithCache() map[int64]*urproto.Upgrade { + ur.lock.RLock() + defer ur.lock.RUnlock() + + return copyMap(ur.upgrades) +} + +func (ur *UpgradeRegistry) GetAllUpgrades(ctx context.Context, useCache bool) (map[int64]*urproto.Upgrade, error) { + if useCache { + return ur.GetAllUpgradesWithCache(), nil + } + + _, _, resolvedUpgrades, _, err := ur.Update(ctx, 0, false) + if err != nil { + return nil, err + } + + return resolvedUpgrades, nil +} + +func (ur *UpgradeRegistry) GetOverriddenUpgradesWithCache() map[int64][]*urproto.Upgrade { + ur.lock.RLock() + defer ur.lock.RUnlock() + + return copyMapList(ur.overriddenUpgrades) +} + +func (ur *UpgradeRegistry) GetOverriddenUpgrades(ctx context.Context, useCache bool) (map[int64][]*urproto.Upgrade, error) { + if useCache { + return ur.GetOverriddenUpgradesWithCache(), nil + } + + _, _, _, overriddenUpgrades, err := ur.Update(ctx, 0, false) + if err != nil { + return nil, err + } + + return overriddenUpgrades, nil +} + +func (ur *UpgradeRegistry) GetUpcomingUpgradesWithCache(height int64, allowedStatus ...urproto.UpgradeStatus) []*urproto.Upgrade { + ur.lock.RLock() + defer ur.lock.RUnlock() + + upcomingUpgrades := sortAndfilterUpgradesByStatus(ur.upgrades, ur.stateMachine, height, allowedStatus...) + + return copyList(upcomingUpgrades) +} + +func (ur *UpgradeRegistry) GetUpcomingUpgrades(ctx context.Context, useCache bool, height int64, allowedStatus ...urproto.UpgradeStatus) ( + []*urproto.Upgrade, + error, +) { + if useCache { + return ur.GetUpcomingUpgradesWithCache(height, allowedStatus...), nil + } + + _, _, resolvedUpgrades, _, err := ur.Update(ctx, 0, false) + if err != nil { + return nil, err + } + + return sortAndfilterUpgradesByStatus(resolvedUpgrades, ur.stateMachine, height, allowedStatus...), nil +} + +func (ur *UpgradeRegistry) GetUpgradeWithCache(height int64) *urproto.Upgrade { + ur.lock.RLock() + defer ur.lock.RUnlock() + + upgrade := filterUpgradesByHeight(ur.upgrades, height) + if upgrade != nil { + return proto.Clone(upgrade).(*urproto.Upgrade) + } + return nil +} + +func (ur *UpgradeRegistry) GetUpgrade(ctx context.Context, useCache bool, height int64) (*urproto.Upgrade, error) { + if useCache { + return ur.GetUpgradeWithCache(height), nil + } + + _, _, resolvedUpgrades, _, err := ur.Update(ctx, 0, false) + if err != nil { + return nil, err + } + + return filterUpgradesByHeight(resolvedUpgrades, height), nil +} + +func (ur *UpgradeRegistry) Update(ctx context.Context, currentHeight int64, commit bool) ( + map[int64]*vrproto.Version, + map[int64][]*vrproto.Version, + map[int64]*urproto.Upgrade, + map[int64][]*urproto.Upgrade, + error, +) { + resolvedVersions, overriddenVersions, err := ur.UpdateVersions(ctx, commit) + if err != nil { + return nil, nil, nil, nil, errors.Wrapf(err, "failed to update versions") + } + + resolvedUpgrades, overriddenUpgrades, err := ur.UpdateUpgrades(ctx, currentHeight, resolvedVersions, commit) + if err != nil { + return nil, nil, nil, nil, errors.Wrapf(err, "failed to update upgrades") + } + + ur.lock.Lock() + defer ur.lock.Unlock() + + ur.syncInfo = SyncInfo{ + LastBlockHeight: currentHeight, + LastUpdateTime: time.Now(), + } + + return resolvedVersions, overriddenVersions, resolvedUpgrades, overriddenUpgrades, nil +} + +func (ur *UpgradeRegistry) GetAllVersionsWithCache() map[int64]*vrproto.Version { + ur.lock.RLock() + defer ur.lock.RUnlock() + + return copyMap(ur.versions) +} + +func (ur *UpgradeRegistry) GetAllVersions(ctx context.Context, useCache bool) (map[int64]*vrproto.Version, error) { + if useCache { + return ur.GetAllVersionsWithCache(), nil + } + + resolvedVersions, _, _, _, err := ur.Update(ctx, 0, false) + if err != nil { + return nil, err + } + + return resolvedVersions, nil +} + +func (ur *UpgradeRegistry) GetOverriddenVersionsWithCache() map[int64][]*vrproto.Version { + ur.lock.RLock() + defer ur.lock.RUnlock() + + return copyMapList(ur.overriddenVersions) +} + +func (ur *UpgradeRegistry) GetOverriddenVersions(ctx context.Context, useCache bool) (map[int64][]*vrproto.Version, error) { + if useCache { + return ur.GetOverriddenVersionsWithCache(), nil + } + + _, overriddenVersions, _, _, err := ur.Update(ctx, 0, false) + if err != nil { + return nil, err + } + + return overriddenVersions, nil +} + +func (ur *UpgradeRegistry) GetVersionWithCache(height int64) *vrproto.Version { + ur.lock.RLock() + defer ur.lock.RUnlock() + + version := filterVersionsByHeight(ur.versions, height) + if version != nil { + return proto.Clone(version).(*vrproto.Version) + } + return nil +} + +func (ur *UpgradeRegistry) GetVersion(ctx context.Context, useCache bool, height int64) (*vrproto.Version, error) { + if useCache { + return ur.GetVersionWithCache(height), nil + } + + resolvedVersions, _, _, _, err := ur.Update(ctx, 0, false) + if err != nil { + return nil, err + } + + return filterVersionsByHeight(resolvedVersions, height), nil +} + +func (ur *UpgradeRegistry) UpdateVersions(ctx context.Context, commit bool) (map[int64]*vrproto.Version, map[int64][]*vrproto.Version, error) { + g, ctx := errgroup.WithContext(ctx) + results := make([][]*vrproto.Version, len(ur.versionProviders)) + + for i, providerName := range ur.versionProviders { + // from go 1.22 the copy of the loop variable is not needed anymore + // https://tip.golang.org/doc/go1.22#language + + g.Go(func() error { + if provider, ok := ur.providers[providerName].(provider.VersionResolver); ok { + versions, err := provider.GetVersions(ctx) + if err != nil { + return errors.Wrapf(err, "%s provider failed to fetch versions", providerName) + } + + if err := checkDuplicates(versions, providerName); err != nil { + return errors.Wrapf(err, "%s version provider returned duplicate versions", providerName) + } + + results[i] = versions + } + return nil + }) + } + + if err := g.Wait(); err != nil { + return nil, nil, err + } + + allVersions := make([]*vrproto.Version, 0) + for _, versions := range results { + allVersions = append(allVersions, versions...) + } + + resolvedVersions, overriddenVersions := resolvePriorities(allVersions) + + if commit { + ur.lock.Lock() + defer ur.lock.Unlock() + + ur.versions = resolvedVersions + ur.overriddenVersions = overriddenVersions + } + + return resolvedVersions, overriddenVersions, nil +} + +func (ur *UpgradeRegistry) UpdateUpgrades(ctx context.Context, currentHeight int64, versions map[int64]*vrproto.Version, commit bool) (map[int64]*urproto.Upgrade, map[int64][]*urproto.Upgrade, error) { + g, ctx := errgroup.WithContext(ctx) + results := make([][]*urproto.Upgrade, len(ur.providers)) + + i := 0 + for _, provider := range ur.providers { + // from go 1.22 the copy of the loop variable (provider) is not needed anymore + // https://tip.golang.org/doc/go1.22#language + // + // but the copy of the global variable (i) is still needed + // https://golang.org/doc/faq#closures_and_goroutines + ii := i + + g.Go(func() error { + upgrades, err := provider.GetUpgrades(ctx) + if err != nil { + return errors.Wrapf(err, "%s provider failed to fetch upgrades", provider.Type()) + } + + if err := checkDuplicates(upgrades, provider.Type()); err != nil { + return errors.Wrapf(err, "%s provider returned duplicate upgrades", provider.Type()) + } + + results[ii] = upgrades + return nil + }) + + i++ + } + + if err := g.Wait(); err != nil { + return nil, nil, err + } + + allUpgrades := make([]*urproto.Upgrade, 0) + for _, upgrades := range results { + allUpgrades = append(allUpgrades, upgrades...) + } + + resolvedUpgrades, overriddenUpgrades := resolvePriorities(allUpgrades) + + // lock just in case the versions map is reference to ur.versions + ur.lock.RLock() + for _, upgrade := range resolvedUpgrades { + // try to resolve version for the upgrade + if upgrade.Tag == "" { + if version, ok := versions[upgrade.Height]; ok { + upgrade.Tag = version.Tag + } + // else { + // TODO: try to resolve version using different methods, RPC, regexes etc + // } + } + } + ur.lock.RUnlock() + + if commit { + ur.lock.Lock() + defer ur.lock.Unlock() + ur.upgrades = resolvedUpgrades + ur.overriddenUpgrades = overriddenUpgrades + + // update statuses of all resolved upgrades + ur.stateMachine.UpdateStatus(currentHeight, ur.upgrades) + } + + return resolvedUpgrades, overriddenUpgrades, nil +} + +func (ur *UpgradeRegistry) RegisterVersion(ctx context.Context, version *vrproto.Version, overwrite bool) error { + ur.lock.RLock() + defer ur.lock.RUnlock() + + switch version.Source { + case urproto.ProviderType_CHAIN: + return errors.New("add upgrade is not supported for chain provider") + + case urproto.ProviderType_DATABASE: + if p, ok := ur.providers[urproto.ProviderType_DATABASE]; ok { + return p.(provider.VersionResolver).RegisterVersion(ctx, version, overwrite) + } else { + return errors.New("database provider is not configured") + } + + case urproto.ProviderType_LOCAL: + if p, ok := ur.providers[urproto.ProviderType_LOCAL]; ok { + return p.(provider.VersionResolver).RegisterVersion(ctx, version, overwrite) + } else { + return errors.New("local provider is not configured") + } + } + + return fmt.Errorf("unknown upgrade source %s", version.GetSource().String()) +} + +func (ur *UpgradeRegistry) CancelUpgrade(ctx context.Context, height int64, source urproto.ProviderType, network string, force bool) error { + if force { + if network != ur.network { + return fmt.Errorf("the network %s does not match the registry network %s", network, ur.network) + } + // this is partially true because in this case the provider doesn't matter, but user should be aware this is only cancelled per blazar node and not globally + if source != urproto.ProviderType_LOCAL { + return fmt.Errorf("force cancel is only supported for local provider") + } + return ur.stateMachine.SetStatus(height, urproto.UpgradeStatus_CANCELLED) + } + + switch source { + // cancel only on this blazar instance + case urproto.ProviderType_LOCAL: + if p, ok := ur.providers[urproto.ProviderType_LOCAL]; ok { + return p.CancelUpgrade(ctx, height, network) + } else { + return errors.New("local provider is not configured") + } + + // cancel on all blazar instances + case urproto.ProviderType_DATABASE: + if p, ok := ur.providers[urproto.ProviderType_DATABASE]; ok { + return p.CancelUpgrade(ctx, height, network) + } else { + return errors.New("database provider is not configured") + } + + default: + return fmt.Errorf("can't cancel upgrade with source %s", source.String()) + } +} + +func (ur *UpgradeRegistry) AddUpgrade(ctx context.Context, upgrade *urproto.Upgrade, overwrite bool) error { + ur.lock.RLock() + defer ur.lock.RUnlock() + + // The use case for cancelled status is for user to create and upgrade with higher proiority to cancel the existing upgrade + if upgrade.Status != urproto.UpgradeStatus_UNKNOWN && upgrade.Status != urproto.UpgradeStatus_CANCELLED { + return errors.New("status is not allowed to be set manually") + } + + if upgrade.Step != urproto.UpgradeStep_NONE { + return errors.New("step is not allowed to be set manually") + } + + switch upgrade.Source { + case urproto.ProviderType_CHAIN: + return errors.New("add upgrade is not supported for chain provider") + + case urproto.ProviderType_DATABASE: + if provider, ok := ur.providers[urproto.ProviderType_DATABASE]; ok { + return provider.AddUpgrade(ctx, upgrade, overwrite) + } else { + return errors.New("database provider is not configured") + } + + case urproto.ProviderType_LOCAL: + if provider, ok := ur.providers[urproto.ProviderType_LOCAL]; ok { + return provider.AddUpgrade(ctx, upgrade, overwrite) + } else { + return errors.New("local provider is not configured") + } + } + + return fmt.Errorf("unknown upgrade source %s", upgrade.Source.String()) +} + +func (ur *UpgradeRegistry) SyncInfo() SyncInfo { + ur.lock.RLock() + defer ur.lock.RUnlock() + + return ur.syncInfo +} + +func (ur *UpgradeRegistry) Network() string { + return ur.network +} + +func resolvePriorities[T interface { + GetPriority() int32 + GetHeight() int64 +}](objects []T) (map[int64]T, map[int64][]T) { + grouppedByHeight := make(map[int64][]T) + for _, object := range objects { + grouppedByHeight[object.GetHeight()] = append(grouppedByHeight[object.GetHeight()], object) + } + + resolvedObjects := make(map[int64]T, 0) + overriddenObjects := make(map[int64][]T, 0) + for height, objects := range grouppedByHeight { + if len(objects) > 1 { + sort.Slice(objects, func(i, j int) bool { + if objects[i].GetPriority() == objects[j].GetPriority() { + panic(fmt.Errorf("found objects with the same height=%d and priority=%d", objects[i].GetHeight(), objects[i].GetPriority())) + } + return objects[i].GetPriority() > objects[j].GetPriority() + }) + overriddenObjects[height] = objects[1:] + } + + resolvedObjects[objects[0].GetHeight()] = objects[0] + } + + return resolvedObjects, overriddenObjects +} + +// check for duplicate upgrades with the same height and priority +func checkDuplicates[T interface { + GetPriority() int32 + GetHeight() int64 +}](versions []T, providerName urproto.ProviderType) error { + set := make(map[int64][]T, len(versions)) + + for _, version := range versions { + if _, ok := set[version.GetHeight()]; ok { + for _, v := range set[version.GetHeight()] { + if version.GetPriority() == v.GetPriority() { + return fmt.Errorf("found versions with the same height (%d) and priority (%d) from the same source (%s)", version.GetHeight(), version.GetPriority(), providerName) + } + } + } + set[version.GetHeight()] = append(set[version.GetHeight()], version) + } + + return nil +} + +func sortAndfilterUpgradesByStatus(upgrades map[int64]*urproto.Upgrade, sm *state_machine.StateMachine, height int64, allowedStatus ...urproto.UpgradeStatus) []*urproto.Upgrade { + upcomingUpgrades := make([]*urproto.Upgrade, 0) + for _, upgrade := range upgrades { + currentStatus := sm.GetStatus(upgrade.Height) + if upgrade.Height >= height && (len(allowedStatus) == 0 || slices.Contains(allowedStatus, currentStatus)) { + upcomingUpgrades = append(upcomingUpgrades, upgrade) + } + } + + sort.Slice(upcomingUpgrades, func(i, j int) bool { + return upcomingUpgrades[i].Height < upcomingUpgrades[j].Height + }) + + return upcomingUpgrades +} + +func filterUpgradesByHeight(upgrades map[int64]*urproto.Upgrade, height int64) *urproto.Upgrade { + if upgrade, ok := upgrades[height]; ok { + return upgrade + } + return nil +} + +func filterVersionsByHeight(versions map[int64]*vrproto.Version, height int64) *vrproto.Version { + if version, ok := versions[height]; ok { + return version + } + return nil +} + +func copyMap[T proto.Message](m map[int64]T) map[int64]T { + newMap := make(map[int64]T, len(m)) + for k, v := range m { + newMap[k] = proto.Clone(v).(T) + } + + return newMap +} + +func copyMapList[T proto.Message](m map[int64][]T) map[int64][]T { + newMap := make(map[int64][]T, len(m)) + for k, v := range m { + newMap[k] = make([]T, len(v)) + for n, vv := range v { + newMap[k][n] = proto.Clone(vv).(T) + } + } + + return newMap +} + +func copyList[T proto.Message](m []T) []T { + newMap := make([]T, len(m)) + for n, v := range m { + newMap[n] = proto.Clone(v).(T) + } + + return newMap +} diff --git a/internal/pkg/upgrades_registry/upgrades_registry_test.go b/internal/pkg/upgrades_registry/upgrades_registry_test.go new file mode 100644 index 0000000..b4eb9c4 --- /dev/null +++ b/internal/pkg/upgrades_registry/upgrades_registry_test.go @@ -0,0 +1,348 @@ +package upgrades_registry + +import ( + "cmp" + "context" + "slices" + "sync" + "testing" + + "blazar/internal/pkg/errors" + urproto "blazar/internal/pkg/proto/upgrades_registry" + vrproto "blazar/internal/pkg/proto/version_resolver" + "blazar/internal/pkg/provider" + "blazar/internal/pkg/provider/database" + "blazar/internal/pkg/provider/local" + "blazar/internal/pkg/testutils" + + sm "blazar/internal/pkg/state_machine" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gorm.io/driver/sqlite" + "gorm.io/gorm" +) + +func addDummyLocalProvider(t *testing.T, ur *UpgradeRegistry) { + _, blazarDir := testutils.NewChainHomeDir(t) + var err error + ur.providers[urproto.ProviderType_LOCAL], err = local.NewProvider( + blazarDir+"/local.db.json", + "test", + 1, + ) + if err != nil { + t.Fatalf("failed to create local provider: %v", err) + } +} + +func prepareMockDatabaseProvider() (*database.Provider, error) { + db, err := gorm.Open(sqlite.Open("file::memory:"), &gorm.Config{}) + if err != nil { + return nil, errors.Wrapf(err, "failed to connect database") + } + err = db.AutoMigrate(&urproto.Upgrade{}) + if err != nil { + return nil, errors.Wrapf(err, "database migration failed for upgrades table") + } + + err = db.AutoMigrate(&vrproto.Version{}) + if err != nil { + return nil, errors.Wrapf(err, "database migration failed for versions table") + } + return database.NewDatabaseProviderWithDB(db, "test", 1), nil +} + +func addDummyDatabaseProvider(t *testing.T, ur *UpgradeRegistry) { + var err error + ur.providers[urproto.ProviderType_DATABASE], err = prepareMockDatabaseProvider() + if err != nil { + t.Fatalf("failed to create database provider: %v", err) + } +} + +func resetProviders(t *testing.T, ur *UpgradeRegistry) { + if _, ok := ur.providers[urproto.ProviderType_LOCAL]; ok { + addDummyLocalProvider(t, ur) + } + if _, ok := ur.providers[urproto.ProviderType_DATABASE]; ok { + addDummyDatabaseProvider(t, ur) + } +} + +func testProviders(t *testing.T, ur *UpgradeRegistry, source urproto.ProviderType) { + tests := []struct { + name string + upgrades []*urproto.Upgrade + testFn func(*testing.T, *UpgradeRegistry) + }{ + { + name: "assert the GetUpcomingUpgrades returns the correct upgrades", + upgrades: []*urproto.Upgrade{ + { + Height: 100, + Tag: "v1.0.0", + Network: "test", + Name: "valid_upcoming_upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: source, + }, + { + Height: 101, + Tag: "", + Network: "test", + // NOTE: the upgrade without a tag should be ignored or fail + // however this decision is left to the caller. Lack of version tag doesn't + // mean the upgrade doesn't exist, it just means it's not ready to be applied + Name: "valid_upgrade_without_tag", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: source, + }, + { + Height: 102, + Tag: "v1.0.0", + Network: "test", + Name: "invalid_upcoming_upgrade_due_to_cancelled_status", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_CANCELLED, + Source: source, + }, + { + Height: 10, + Tag: "v1.0.0", + Network: "test", + Name: "invalid_upcoming_upgrade_due_to_passed_height", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: source, + }, + }, + testFn: func(t *testing.T, ur *UpgradeRegistry) { + currentHeight := int64(50) + upgrades, err := ur.GetUpcomingUpgrades(context.Background(), false, currentHeight, urproto.UpgradeStatus_ACTIVE) + require.NoError(t, err) + + slices.SortFunc(upgrades, func(i, j *urproto.Upgrade) int { + return cmp.Compare(i.Height, j.Height) + }) + + assert.Len(t, upgrades, 2) + assert.Equal(t, int64(100), upgrades[0].Height) + assert.Equal(t, int64(101), upgrades[1].Height) + + // return all 3 three upcoming upgrades regardless of status + upgradesNoFilter, err := ur.GetUpcomingUpgrades(context.Background(), false, currentHeight) + require.NoError(t, err) + assert.Len(t, upgradesNoFilter, 3) + }, + }, + { + name: "adding duplicate upgrade should fail", + upgrades: []*urproto.Upgrade{ + { + Height: 100, + Tag: "v1.0.0", + Network: "test", + Name: "valid_upcoming_upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: source, + }, + }, + testFn: func(t *testing.T, ur *UpgradeRegistry) { + err := ur.AddUpgrade(context.Background(), &urproto.Upgrade{ + Height: 100, + Tag: "different-tag", + Network: "test", + Name: "valid_upcoming_upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: source, + }, false) + assert.Error(t, err) + }, + }, + { + name: "cancel upgrade check", + upgrades: []*urproto.Upgrade{ + { + Height: 100, + Tag: "v1.0.0", + Network: "test", + Name: "valid_upcoming_upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: source, + }, + }, + testFn: func(t *testing.T, ur *UpgradeRegistry) { + err := ur.CancelUpgrade(context.Background(), 100, source, "test", false) + require.NoError(t, err) + upgrade, err := ur.GetUpgrade(context.Background(), false, 100) + require.NoError(t, err) + assert.Equal(t, urproto.UpgradeStatus_CANCELLED, upgrade.Status) + // non existent upgrade should also not fail + err = ur.CancelUpgrade(context.Background(), 1000000, source, "test", false) + require.NoError(t, err) + upgrade, err = ur.GetUpgrade(context.Background(), false, 1000000) + require.NoError(t, err) + assert.Equal(t, urproto.UpgradeStatus_CANCELLED, upgrade.Status) + }, + }, + { + name: "override upgrade check", + upgrades: []*urproto.Upgrade{ + { + Height: 100, + Tag: "v1.0.0", + Network: "test", + Name: "valid_upcoming_upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: source, + }, + }, + testFn: func(t *testing.T, ur *UpgradeRegistry) { + err := ur.AddUpgrade(context.Background(), &urproto.Upgrade{ + Height: 100, + Tag: "different-tag", + Network: "test", + Name: "valid_upcoming_upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: source, + Priority: 99, + }, false) + require.NoError(t, err) + upgrade, err := ur.GetUpgrade(context.Background(), false, 100) + require.NoError(t, err) + assert.Equal(t, "different-tag", upgrade.Tag) + err = ur.AddUpgrade(context.Background(), &urproto.Upgrade{ + Height: 100, + Tag: "another-tag", + Network: "test", + Name: "valid_upcoming_upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: source, + Priority: 98, + }, false) + require.NoError(t, err) + upgrade, err = ur.GetUpgrade(context.Background(), false, 100) + require.NoError(t, err) + assert.Equal(t, "different-tag", upgrade.Tag) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetProviders(t, ur) + for _, upgrade := range tt.upgrades { + err := ur.AddUpgrade(context.Background(), upgrade, false) + if err != nil { + t.Fatalf("failed to add upgrade: %v", err) + } + } + + _, _, _, _, err := ur.Update(context.Background(), 50, true) + require.NoError(t, err) + + tt.testFn(t, ur) + }) + } +} + +func TestProviders(t *testing.T) { + ur1 := &UpgradeRegistry{ + providers: make(map[urproto.ProviderType]provider.UpgradeProvider), + upgrades: make(map[int64]*urproto.Upgrade), + network: "test", + lock: &sync.RWMutex{}, + stateMachine: sm.NewStateMachine(nil), + } + addDummyLocalProvider(t, ur1) + t.Run("TestLocal", func(t *testing.T) { + testProviders(t, ur1, urproto.ProviderType_LOCAL) + }) + + ur2 := &UpgradeRegistry{ + providers: make(map[urproto.ProviderType]provider.UpgradeProvider), + upgrades: make(map[int64]*urproto.Upgrade), + network: "test", + lock: &sync.RWMutex{}, + stateMachine: sm.NewStateMachine(nil), + } + addDummyDatabaseProvider(t, ur2) + t.Run("TestDatabase", func(t *testing.T) { + testProviders(t, ur2, urproto.ProviderType_DATABASE) + }) +} + +func TestSimultaneousProviders(t *testing.T) { + ur := &UpgradeRegistry{ + providers: make(map[urproto.ProviderType]provider.UpgradeProvider), + upgrades: make(map[int64]*urproto.Upgrade), + network: "test", + lock: &sync.RWMutex{}, + stateMachine: sm.NewStateMachine(nil), + } + addDummyLocalProvider(t, ur) + addDummyDatabaseProvider(t, ur) + tests := []struct { + name string + upgrades []*urproto.Upgrade + testFn func(*testing.T, *UpgradeRegistry) + }{ + { + name: "test same priority height different provider", + upgrades: []*urproto.Upgrade{ + { + Height: 100, + Tag: "v1.0.0", + Network: "test", + Name: "valid_upcoming_upgrade", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: urproto.ProviderType_DATABASE, + Priority: 1, + }, + }, + testFn: func(t *testing.T, ur *UpgradeRegistry) { + err := ur.AddUpgrade(context.Background(), &urproto.Upgrade{ + Height: 100, + Tag: "different-tag", + Network: "test", + Name: "different_name", + Type: urproto.UpgradeType_NON_GOVERNANCE_COORDINATED, + Status: urproto.UpgradeStatus_UNKNOWN, + Source: urproto.ProviderType_LOCAL, + Priority: 1, + }, false) + // TODO: this should ideally error + require.NoError(t, err) + assert.PanicsWithError(t, "found objects with the same height=100 and priority=1", func() { + _, _ = ur.GetAllUpgrades(context.Background(), false) + }) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetProviders(t, ur) + for _, upgrade := range tt.upgrades { + err := ur.AddUpgrade(context.Background(), upgrade, false) + if err != nil { + t.Fatalf("failed to add upgrade: %v", err) + } + } + + _, _, _, _, err := ur.Update(context.Background(), 50, true) + require.NoError(t, err) + + tt.testFn(t, ur) + }) + } +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..1fe0b38 --- /dev/null +++ b/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "blazar/cmd" +) + +func main() { + cmd.Execute() +} diff --git a/migrations/00_init_database.psql b/migrations/00_init_database.psql new file mode 100644 index 0000000..0489c35 --- /dev/null +++ b/migrations/00_init_database.psql @@ -0,0 +1,36 @@ +-- Note, run this as a database superuser. +-- This file has extension .psql instead of .sql, so that it does not get +-- executed by `sqlx`. `sqlx` needs to connect as `blazar_setup` user, but this +-- is the migration that creates that user, so we treat it separately. Also, +-- this file has a `psql` directive in it, so it must be executed by `psql`. + +-- Role used by the application. +CREATE ROLE blazar_app LOGIN PASSWORD 'blazar_app'; + +-- Role used for migrations and administration. +CREATE ROLE blazar_setup LOGIN PASSWORD 'blazar_setup'; + +-- Role used for querying. +CREATE ROLE blazar_readonly LOGIN PASSWORD 'blazar_readonly'; + +CREATE DATABASE blazar OWNER blazar_setup; + +\connect blazar; + +-- By default, PostgreSQL creates a schema "public" that can be modified +-- by any user ("PUBLIC"). We want more selective permissions. +REVOKE ALL PRIVILEGES ON SCHEMA public FROM PUBLIC; +GRANT USAGE ON SCHEMA public TO blazar_app; +GRANT USAGE ON SCHEMA public TO blazar_readonly; +GRANT USAGE, CREATE ON SCHEMA public TO blazar_setup; + +-- The user "blazar_setup" will create new tables. When it creates them, +-- "blazar_app" and "blazar_readonly" should be given access. So we need to +-- modify the default privileges of "blazar_setup" to grant to them. +ALTER DEFAULT PRIVILEGES FOR ROLE blazar_setup +GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES +TO blazar_app; + +ALTER DEFAULT PRIVILEGES FOR ROLE blazar_setup +GRANT SELECT ON TABLES +TO blazar_readonly; diff --git a/migrations/01_init_tables.psql b/migrations/01_init_tables.psql new file mode 100644 index 0000000..f0dbc00 --- /dev/null +++ b/migrations/01_init_tables.psql @@ -0,0 +1,24 @@ +CREATE TABLE "upgrades" + ( + "height" BIGINT NOT NULL, + "tag" TEXT NOT NULL, + "network" TEXT NOT NULL, + "name" TEXT NOT NULL, + "type" INTEGER NOT NULL, + "status" INTEGER NOT NULL DEFAULT 0, + "step" INTEGER NOT NULL DEFAULT 0, + "priority" INTEGER, + "source" INTEGER NOT NULL, + "proposal_id" BIGINT, + PRIMARY KEY ("height", "network", "priority") + ) + +CREATE TABLE "versions" + ( + "height" BIGINT NOT NULL, + "network" TEXT NOT NULL, + "tag" TEXT, + "source" INTEGER NOT NULL, + "priority" INTEGER, + PRIMARY KEY ("height", "network", "priority") + ) diff --git a/proto/blazar.proto b/proto/blazar.proto new file mode 100644 index 0000000..0f4614e --- /dev/null +++ b/proto/blazar.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +option go_package = "internal/pkg/proto/blazar"; + +service Blazar { + rpc GetLastestHeight (GetLatestHeightRequest) returns (GetLatestHeightResponse) {} +} + +message GetLatestHeightRequest {} + +message GetLatestHeightResponse { + int64 height = 1; + string network = 2; +} diff --git a/proto/checks.proto b/proto/checks.proto new file mode 100644 index 0000000..08a453d --- /dev/null +++ b/proto/checks.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +option go_package = "internal/pkg/proto/daemon"; + +enum PreCheck { + // Try to fetch the docker image before upgrade + PULL_DOCKER_IMAGE = 0; + // Set the node's halt-height before non-governance coordinated upgrades + SET_HALT_HEIGHT = 1; +} + +enum PostCheck { + // Check if the gRPC endpoint is reachable + GRPC_RESPONSIVE = 0; + + // Check if node reached the next block height + CHAIN_HEIGHT_INCREASED = 1; + + // Check if we signed the first block post upgrade + FIRST_BLOCK_VOTED = 2; +} + +enum CheckStatus { + // Check is waiting to be executed + PENDING = 0; + + // Check is currently being executed + RUNNING = 1; + + // Check execution has finished + FINISHED = 2; +} diff --git a/proto/google/api/annotations.proto b/proto/google/api/annotations.proto new file mode 100644 index 0000000..84c4816 --- /dev/null +++ b/proto/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/proto/google/api/http.proto b/proto/google/api/http.proto new file mode 100644 index 0000000..e327037 --- /dev/null +++ b/proto/google/api/http.proto @@ -0,0 +1,371 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` +// +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They +// are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL +// query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP +// request body, all +// fields are passed via URL path and URL query parameters. +// +// Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// The following example selects a gRPC method and applies an `HttpRule` to it: +// +// http: +// rules: +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax + // details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/proto/upgrades_registry.proto b/proto/upgrades_registry.proto new file mode 100644 index 0000000..d081fab --- /dev/null +++ b/proto/upgrades_registry.proto @@ -0,0 +1,198 @@ +syntax = "proto3"; + +import "google/api/annotations.proto"; + +option go_package = "internal/pkg/proto/upgrades_registry"; + +service UpgradeRegistry { + // register a new upgrade with blazar + rpc AddUpgrade (AddUpgradeRequest) returns (AddUpgradeResponse) { + option (google.api.http) = { post: "/v1/upgrades/add", body: "*" }; + } + + // list upgrades registered with blazar + rpc ListUpgrades (ListUpgradesRequest) returns (ListUpgradesResponse) { + option (google.api.http) = { get: "/v1/upgrades/list" }; + } + + // cancel upgrade + rpc CancelUpgrade (CancelUpgradeRequest) returns (CancelUpgradeResponse) { + option (google.api.http) = { post: "/v1/upgrades/cancel", body: "*" }; + } + + // force the registry to sync the upgrades from all registered providers + rpc ForceSync (ForceSyncRequest) returns (ForceSyncResponse) { + option (google.api.http) = { post: "/v1/upgrades/force_sync", body: "*" }; + } +} + +enum UpgradeStep { + // NONE is the default step of an upgrade. It means that the upgrade is not being executed + NONE = 0; + + // MONITORING means that blazar sees the upcoming upgrade and is monitoring the chain for the upgrade height + MONITORING = 1; + + // DOCKER_COMPOSE_FILE_UPGRADE indicates the blazar is executing the core part of the upgrade vua docker compose + COMPOSE_FILE_UPGRADE = 2; + + // PRE_UPGRADE_CHECK indicates that the blazar is executing the pre-upgrade checks + PRE_UPGRADE_CHECK = 3; + + // POST_UPGRADE_CHECK indicates that the blazar is executing the post-upgrade checks + POST_UPGRADE_CHECK = 4; +} + +enum UpgradeStatus { + // UNKNOWN is the default status of an upgrade. It means that the status + UNKNOWN = 0; + + // SCHEDULED is the initial status of an upgrade. It means that the + // upgrade is registered with the registry but it's not active yet. + // + // An upgrade coming from the chain governance that is still being voted on, is marked as scheduled + SCHEDULED = 1; + + // ACTIVE means that the upgrade is acknowledged by network governance or a user and is ready to be executed. + ACTIVE = 2; + + // EXECUTING means that the upgrade is currently being executed. The height is reached. + EXECUTING = 3; + + // COMPLETED means that the upgrade has been successfully executed. + COMPLETED = 4; + + // FAILED means that the upgrade has failed to execute. + FAILED = 5; + + // CANCELLED means that the upgrade has been cancelled by a user or the network + CANCELLED = 6; + + // EXPIRED means that the upgrade time has passed and blazar did not do anything about it (e.g historical upgrade from the chain governance) + EXPIRED = 7; +} + +enum UpgradeType { + // GOVERNANCE is a coordinated upgrade that is initiated by the chain + // governance. The upgrade is expected to be coordinated across all + // validators at specific height. + // + // Requirements: + // * there is an onchain governance proposal that has passed + GOVERNANCE = 0; + + // NON_GOVERNANCE_COORDINATED the upgrade is not coming from the chain, + // but rather is initiated by the operators. + // + // Requirements: + // * there should be no onchain governance proposal + // * the upgrade is expected to happen at the same height for all validators (usually it's a state breaking change) + NON_GOVERNANCE_COORDINATED = 1; + + // NON_GOVERNANCE_UNCOORDINATED the upgrade is not coming from the chain, + // but rather is initiated by the operators. + // + // Requirements: + // * there should be no onchain governance proposal + // * the upgrade is not expected to happen at any specific height. Validators are free to upgrade at their own pace. (usually non-state breaking changes) + NON_GOVERNANCE_UNCOORDINATED = 2; +} + +enum ProviderType { + // CHAIN means that the upgrade is coming from onchain governance + CHAIN = 0; + + // LOCAL means that the upgrade is coming from blazar local storage + LOCAL = 1; + + // DATABASE means that the upgrade is coming from the database (e.g PostgreSQL) + DATABASE = 2; +} + +message Upgrade { + // the height at which the upgrade is expected to happen + // @gotags: gorm:"primaryKey;not null" + int64 height = 1; + + // docker image tag + // @gotags: gorm:"type:text;not null" + string tag = 2; + + // cosmos network name (e.g. cosmoshub) or chain id (e.g. cosmoshub-4) + // @gotags: gorm:"primaryKey;type:text;not null" + string network = 3; + + // the short title of the upgrade (e.g. "Coordinated upgrade to v0.42.4 announced on discord channel #announcements") + // @gotags: gorm:"type:text;not null" + string name = 4; + + // type of the upgrade (defines what checks and actions should be taken) + // @gotags: gorm:"not null" + UpgradeType type = 5; + + // status of the upgrade (DONT set this field manually, it's managed by the registry) + // @gotags: gorm:"default:0;not null" + UpgradeStatus status = 6; + + // current execution step (DONT set this field manually, it's managed by the registry) + // @gotags: gorm:"default:0;not null" + UpgradeStep step = 7; + + // priority of the upgrade (highest priority wins) + // @gotags: gorm:"primaryKey" + int32 priority = 8; + + // source of the upgrade + // @gotags: gorm:"not null" + ProviderType source = 9; + + // propoal id associated with the upgrade + optional int64 proposal_id = 10; +} + +// This is the structure of /blazar/upgrades.json +message Upgrades { + repeated Upgrade upgrades = 1; +} + +message AddUpgradeRequest { + // The new upgrade to be registered + Upgrade upgrade = 1; + + // If set to true, the upgrade will be overwritten if it already exists + bool overwrite = 2; +} + +message AddUpgradeResponse {} + +message ListUpgradesRequest { + bool disable_cache = 1; + + optional int64 height = 2; + optional UpgradeType type = 3; + optional ProviderType source = 4; + repeated UpgradeStatus status = 5; + optional int64 limit = 6; +} + +message ListUpgradesResponse { + repeated Upgrade upgrades = 1; +} + +message CancelUpgradeRequest { + int64 height = 1; + ProviderType source = 2; + + // if set to true, the upgrade is cancelled through the state machine, in this case 'source' is ignored + bool force = 3; +} + +message CancelUpgradeResponse {} + +// ForceSyncRequest is used to force the registry to sync the upgrades from all registered providers +message ForceSyncRequest {} + +message ForceSyncResponse { + // the height at which the registry is currently synced + int64 height = 1; +} diff --git a/proto/version_resolver.proto b/proto/version_resolver.proto new file mode 100644 index 0000000..1095def --- /dev/null +++ b/proto/version_resolver.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +import "upgrades_registry.proto"; +import "google/api/annotations.proto"; + +option go_package = "internal/pkg/proto/version_resolver"; + +service VersionResolver { + // register a new version tag for a given height and network + rpc AddVersion (RegisterVersionRequest) returns (RegisterVersionResponse) { + option (google.api.http) = { post: "/v1/versions/add", body: "*" }; + } + + // retrieve the version tag for a given height and network + rpc GetVersion (GetVersionRequest) returns (GetVersionResponse) { + option (google.api.http) = { get: "/v1/versions/get" }; + } + + // list all registered versions + rpc ListVersions (ListVersionsRequest) returns (ListVersionsResponse) { + option (google.api.http) = { get: "/v1/versions/list" }; + } +} + +message Version { + // upgrade height the version tag is valid for + // @gotags: gorm:"primaryKey;not null" + int64 height= 1; + + // chain network name + // @gotags: gorm:"primaryKey;type:text;not null" + string network = 2; + + // version tag + string tag = 3; + + // source of the upgrade + // @gotags: gorm:"not null" + ProviderType source = 4; + + // the version priority + // @gotags: gorm:"primaryKey" + int32 priority = 5; +} + +message RegisterVersionRequest { + Version version = 1; + bool overwrite = 2; +} + +message RegisterVersionResponse{} + +message GetVersionRequest { + bool disable_cache = 1; + int64 height = 2; +} + +message GetVersionResponse { + optional Version version = 1; +} + +message ListVersionsRequest { + bool disable_cache = 1; + optional int64 height = 2; + optional ProviderType source = 3; +} + +message ListVersionsResponse { + repeated Version versions = 1; +} diff --git a/proxy.sample.toml b/proxy.sample.toml new file mode 100644 index 0000000..cfa9bef --- /dev/null +++ b/proxy.sample.toml @@ -0,0 +1,17 @@ +# The Blazar Proxy binds to given host and port. +host = "0.0.0.0" +http-port = 1234 + +[[instance]] +name = "localhost" +host = "127.0.0.1" +http-port = 1234 +grpc-port = 5678 +network = "" + +# [[instance]] +# name = "" +# host = "" +# http-port = +# grpc-port = +# network = "" diff --git a/testdata/config/load-webhook-url/webhook b/testdata/config/load-webhook-url/webhook new file mode 100644 index 0000000..4e80d63 --- /dev/null +++ b/testdata/config/load-webhook-url/webhook @@ -0,0 +1,2 @@ +abcd + diff --git a/testdata/config/validate-chain-home/chain-home-dir/data/.gitkeep b/testdata/config/validate-chain-home/chain-home-dir/data/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/testdata/config/validate-chain-home/chain-home-file b/testdata/config/validate-chain-home/chain-home-file new file mode 100644 index 0000000..e69de29 diff --git a/testdata/config/validate-chain-home/chain-home-with-upgrade-json/data/upgrade-info.json b/testdata/config/validate-chain-home/chain-home-with-upgrade-json/data/upgrade-info.json new file mode 100644 index 0000000..e69de29 diff --git a/testdata/config/validate-compose-file/invalid_mode.toml b/testdata/config/validate-compose-file/invalid_mode.toml new file mode 100644 index 0000000..c771774 --- /dev/null +++ b/testdata/config/validate-compose-file/invalid_mode.toml @@ -0,0 +1 @@ +upgrade-mode = "something" diff --git a/testdata/config/validate-compose-file/some-directory/.gitkeep b/testdata/config/validate-compose-file/some-directory/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/testdata/config/validate-compose-file/valid-compose.yaml b/testdata/config/validate-compose-file/valid-compose.yaml new file mode 100644 index 0000000..e69de29 diff --git a/testdata/config/validate-docker-credential-helper/some-directory/.gitkeep b/testdata/config/validate-docker-credential-helper/some-directory/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/testdata/config/validate-docker-credential-helper/valid-docker-credential-helper b/testdata/config/validate-docker-credential-helper/valid-docker-credential-helper new file mode 100755 index 0000000..405de08 --- /dev/null +++ b/testdata/config/validate-docker-credential-helper/valid-docker-credential-helper @@ -0,0 +1,2 @@ +#!/bin/sh +echo "{\"Username": \"test\", \"Secret\": \"test\"}" diff --git a/testdata/daemon/blazar/.gitkeep b/testdata/daemon/blazar/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/testdata/daemon/chain-home/config/app.toml b/testdata/daemon/chain-home/config/app.toml new file mode 100644 index 0000000..a80f875 --- /dev/null +++ b/testdata/daemon/chain-home/config/app.toml @@ -0,0 +1,242 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +############################################################################### +### Base Configuration ### +############################################################################### + +# The minimum gas prices a validator is willing to accept for processing a +# transaction. A transaction's fees must meet the minimum of any denomination +# specified in this config (e.g. 0.25token1,0.0001token2). +minimum-gas-prices = "0stake" + +# The maximum gas a query coming over rest/grpc may consume. +# If this is set to zero, the query can consume an unbounded amount of gas. +query-gas-limit = "0" + +# default: the last 362880 states are kept, pruning at 10 block intervals +# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) +# everything: 2 latest states will be kept; pruning at 10 block intervals. +# custom: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval' +pruning = "default" + +# These are applied if and only if the pruning strategy is custom. +pruning-keep-recent = "0" +pruning-interval = "0" + +# HaltHeight contains a non-zero block height at which a node will gracefully +# halt and shutdown that can be used to assist upgrades and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-height = 0 + +# HaltTime contains a non-zero minimum block time (in Unix seconds) at which +# a node will gracefully halt and shutdown that can be used to assist upgrades +# and testing. +# +# Note: Commitment of state will be attempted on the corresponding block. +halt-time = 0 + +# MinRetainBlocks defines the minimum block height offset from the current +# block being committed, such that all blocks past this offset are pruned +# from CometBFT. It is used as part of the process of determining the +# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates +# that no blocks should be pruned. +# +# This configuration value is only responsible for pruning CometBFT blocks. +# It has no bearing on application state pruning which is determined by the +# "pruning-*" configurations. +# +# Note: CometBFT block pruning is dependant on this parameter in conjunction +# with the unbonding (safety threshold) period, state pruning and state sync +# snapshot parameters to determine the correct minimum value of +# ResponseCommit.RetainHeight. +min-retain-blocks = 0 + +# InterBlockCache enables inter-block caching. +inter-block-cache = true + +# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, +# which informs CometBFT what to index. If empty, all events will be indexed. +# +# Example: +# ["message.sender", "message.recipient"] +index-events = [] + +# IavlCacheSize set the size of the iavl tree cache (in number of nodes). +iavl-cache-size = 781250 + +# IAVLDisableFastNode enables or disables the fast node feature of IAVL. +# Default is false. +iavl-disable-fastnode = false + +# AppDBBackend defines the database backend type to use for the application and snapshots DBs. +# An empty string indicates that a fallback will be used. +# The fallback is the db_backend value set in CometBFT's config.toml. +app-db-backend = "" + +############################################################################### +### Telemetry Configuration ### +############################################################################### + +[telemetry] + +# Prefixed with keys to separate services. +service-name = "" + +# Enabled enables the application telemetry functionality. When enabled, +# an in-memory sink is also enabled by default. Operators may also enabled +# other sinks such as Prometheus. +enabled = false + +# Enable prefixing gauge values with hostname. +enable-hostname = false + +# Enable adding hostname to labels. +enable-hostname-label = false + +# Enable adding service to labels. +enable-service-label = false + +# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. +prometheus-retention-time = 0 + +# GlobalLabels defines a global set of name/value label tuples applied to all +# metrics emitted using the wrapper functions defined in telemetry package. +# +# Example: +# [["chain_id", "cosmoshub-1"]] +global-labels = [] + +# MetricsSink defines the type of metrics sink to use. +metrics-sink = "" + +# StatsdAddr defines the address of a statsd server to send metrics to. +# Only utilized if MetricsSink is set to "statsd" or "dogstatsd". +statsd-addr = "" + +# DatadogHostname defines the hostname to use when emitting metrics to +# Datadog. Only utilized if MetricsSink is set to "dogstatsd". +datadog-hostname = "" + +############################################################################### +### API Configuration ### +############################################################################### + +[api] + +# Enable defines if the API server should be enabled. +enable = true + +# Swagger defines if swagger documentation should automatically be registered. +swagger = false + +# Address defines the API server to listen on. +address = "tcp://0.0.0.0:1317" + +# MaxOpenConnections defines the number of maximum open connections. +max-open-connections = 1000 + +# RPCReadTimeout defines the CometBFT RPC read timeout (in seconds). +rpc-read-timeout = 10 + +# RPCWriteTimeout defines the CometBFT RPC write timeout (in seconds). +rpc-write-timeout = 0 + +# RPCMaxBodyBytes defines the CometBFT maximum request body (in bytes). +rpc-max-body-bytes = 1000000 + +# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). +enabled-unsafe-cors = false + +############################################################################### +### gRPC Configuration ### +############################################################################### + +[grpc] + +# Enable defines if the gRPC server should be enabled. +enable = true + +# Address defines the gRPC server address to bind to. +address = "0.0.0.0:9090" + +# MaxRecvMsgSize defines the max message size in bytes the server can receive. +# The default value is 10MB. +max-recv-msg-size = "10485760" + +# MaxSendMsgSize defines the max message size in bytes the server can send. +# The default value is math.MaxInt32. +max-send-msg-size = "2147483647" + +############################################################################### +### gRPC Web Configuration ### +############################################################################### + +[grpc-web] + +# GRPCWebEnable defines if the gRPC-web should be enabled. +# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. +# NOTE: gRPC-Web uses the same address as the API server. +enable = true + +############################################################################### +### State Sync Configuration ### +############################################################################### + +# State sync snapshots allow other nodes to rapidly join the network without replaying historical +# blocks, instead downloading and applying a snapshot of the application state at a given height. +[state-sync] + +# snapshot-interval specifies the block interval at which local state sync snapshots are +# taken (0 to disable). +snapshot-interval = 0 + +# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). +snapshot-keep-recent = 2 + +############################################################################### +### State Streaming ### +############################################################################### + +# Streaming allows nodes to stream state to external systems. +[streaming] + +# streaming.abci specifies the configuration for the ABCI Listener streaming service. +[streaming.abci] + +# List of kv store keys to stream out via gRPC. +# The store key names MUST match the module's StoreKey name. +# +# Example: +# ["acc", "bank", "gov", "staking", "mint"[,...]] +# ["*"] to expose all keys. +keys = [] + +# The plugin name used for streaming via gRPC. +# Streaming is only enabled if this is set. +# Supported plugins: abci +plugin = "" + +# stop-node-on-err specifies whether to stop the node on message delivery error. +stop-node-on-err = true + +############################################################################### +### Mempool ### +############################################################################### + +[mempool] + +# Setting max-txs to 0 will allow for a unbounded amount of transactions in the mempool. +# Setting max_txs to negative 1 (-1) will disable transactions from being inserted into the mempool (no-op mempool). +# Setting max_txs to a positive number (> 0) will limit the number of transactions in the mempool, by the specified amount. +# +# Note, this configuration only applies to SDK built-in app-side mempool +# implementations. +max-txs = -1 + +[custom] + +# That field will be parsed by server.InterceptConfigsPreRunHandler and held by viper. +# Do not forget to add quotes around the value if it is a string. +custom-field = "anything" diff --git a/testdata/daemon/chain-home/config/client.toml b/testdata/daemon/chain-home/config/client.toml new file mode 100644 index 0000000..05f8ad9 --- /dev/null +++ b/testdata/daemon/chain-home/config/client.toml @@ -0,0 +1,21 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +############################################################################### +### Client Configuration ### +############################################################################### + +# The network chain ID +chain-id = "demo" + +# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) +keyring-backend = "test" + +# CLI output format (text|json) +output = "text" + +# : to CometBFT RPC interface for this chain +node = "tcp://localhost:26657" + +# Transaction broadcasting mode (sync|async) +broadcast-mode = "sync" diff --git a/testdata/daemon/chain-home/config/config.toml b/testdata/daemon/chain-home/config/config.toml new file mode 100644 index 0000000..52c7948 --- /dev/null +++ b/testdata/daemon/chain-home/config/config.toml @@ -0,0 +1,498 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.cometbft" by default, but could be changed via $CMTHOME env variable +# or --home cmd flag. + +# The version of the CometBFT binary that created or +# last modified the config file. Do not modify this. +version = "0.38.12" + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the CometBFT binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "test" + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for CometBFT to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum number of requests that can be sent in a batch +# If the value is set to '0' (zero-value), then no maximum batch size will be +# enforced for a JSON-RPC batch request. +max_request_batch_size = 10 + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to CometBFT's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for CometBFT to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "localhost:6060" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + +# Recheck (default: true) defines whether CometBFT should recheck the +# validity for all remaining transaction in the mempool after a block. +# Since a block affects the application state, some transactions in the +# mempool may become invalid. If this does not apply to your application, +# you can disable rechecking. +recheck = true + +# recheck_timeout is the time the application has during the rechecking process +# to return CheckTx responses, once all requests have been sent. Responses that +# arrive after the timeout expires are discarded. It only applies to +# non-local ABCI clients and when recheck is enabled. +# +# The ideal value will strongly depend on the application. It could roughly be estimated as the +# average size of the mempool multiplied by the average time it takes the application to validate one +# transaction. We consider that the ABCI application runs in the same location as the CometBFT binary +# so that the recheck duration is not affected by network delays when making requests and receiving responses. +recheck_timeout = "1s" + +# Broadcast (default: true) defines whether the mempool should relay +# transactions to other peers. Setting this to false will stop the mempool +# from relaying transactions to other peers until they are included in a +# block. In other words, if Broadcast is disabled, only the peer you send +# the tx to will see it until it is included in a block. +broadcast = true + +# WalPath (default: "") configures the location of the Write Ahead Log +# (WAL) for the mempool. The WAL is disabled by default. To enable, set +# WalPath to where you want the WAL to be written (e.g. +# "data/mempool.wal"). +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = 0 +experimental_max_gossip_connections_to_non_persistent_peers = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Block Sync Configuration Options ### +####################################################### +[blocksync] + +# Block Sync version to use: +# +# In v0.37, v1 and v2 of the block sync protocols were deprecated. +# Please use v0 instead. +# +# 1) "v0" - the default block sync implementation +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "2s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Storage Configuration Options ### +####################################################### +[storage] + +# Set to true to discard ABCI responses from the state store, which can save a +# considerable amount of disk space. Set to false to ensure ABCI responses are +# persisted. ABCI responses are required for /block_results RPC queries, and to +# reindex events in the command-line tool. +discard_abci_responses = false + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "cometbft" diff --git a/testdata/daemon/chain-home/config/genesis.json b/testdata/daemon/chain-home/config/genesis.json new file mode 100644 index 0000000..ef6b0e9 --- /dev/null +++ b/testdata/daemon/chain-home/config/genesis.json @@ -0,0 +1,296 @@ +{ + "app_name": "simd", + "app_version": "0.50.10", + "genesis_time": "2024-10-20T21:03:44.393891685Z", + "chain_id": "demo", + "initial_height": 1, + "app_hash": null, + "app_state": { + "auth": { + "params": { + "max_memo_characters": "256", + "tx_sig_limit": "7", + "tx_size_cost_per_byte": "10", + "sig_verify_cost_ed25519": "590", + "sig_verify_cost_secp256k1": "1000" + }, + "accounts": [ + { + "@type": "/cosmos.auth.v1beta1.BaseAccount", + "address": "cosmos1p972yd2ygx2ffrx8uyzc366z8r54qpq6pcaaey", + "pub_key": null, + "account_number": "0", + "sequence": "0" + }, + { + "@type": "/cosmos.auth.v1beta1.BaseAccount", + "address": "cosmos1qjcnvvuk5tsx8mlgwqj6n66n59y57v2sqyq8tj", + "pub_key": null, + "account_number": "1", + "sequence": "0" + } + ] + }, + "authz": { + "authorization": [] + }, + "bank": { + "params": { + "send_enabled": [], + "default_send_enabled": true + }, + "balances": [ + { + "address": "cosmos1qjcnvvuk5tsx8mlgwqj6n66n59y57v2sqyq8tj", + "coins": [ + { + "denom": "stake", + "amount": "5000000000" + } + ] + }, + { + "address": "cosmos1p972yd2ygx2ffrx8uyzc366z8r54qpq6pcaaey", + "coins": [ + { + "denom": "stake", + "amount": "5000000000" + } + ] + } + ], + "supply": [ + { + "denom": "stake", + "amount": "10000000000" + } + ], + "denom_metadata": [], + "send_enabled": [] + }, + "circuit": { + "account_permissions": [], + "disabled_type_urls": [] + }, + "consensus": null, + "crisis": { + "constant_fee": { + "denom": "stake", + "amount": "1000" + } + }, + "distribution": { + "params": { + "community_tax": "0.020000000000000000", + "base_proposer_reward": "0.000000000000000000", + "bonus_proposer_reward": "0.000000000000000000", + "withdraw_addr_enabled": true + }, + "fee_pool": { + "community_pool": [] + }, + "delegator_withdraw_infos": [], + "previous_proposer": "", + "outstanding_rewards": [], + "validator_accumulated_commissions": [], + "validator_historical_rewards": [], + "validator_current_rewards": [], + "delegator_starting_infos": [], + "validator_slash_events": [] + }, + "evidence": { + "evidence": [] + }, + "feegrant": { + "allowances": [] + }, + "genutil": { + "gen_txs": [ + { + "body": { + "messages": [ + { + "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", + "description": { + "moniker": "test", + "identity": "", + "website": "", + "security_contact": "", + "details": "" + }, + "commission": { + "rate": "0.100000000000000000", + "max_rate": "0.200000000000000000", + "max_change_rate": "0.010000000000000000" + }, + "min_self_delegation": "1", + "delegator_address": "", + "validator_address": "cosmosvaloper1p972yd2ygx2ffrx8uyzc366z8r54qpq6yvfg4h", + "pubkey": { + "@type": "/cosmos.crypto.ed25519.PubKey", + "key": "o2e9nKo4olR+XcPKeWeZYKYa4dSHeeRsKx6WJwhGZLg=" + }, + "value": { + "denom": "stake", + "amount": "1000000" + } + } + ], + "memo": "fe9b522e38d3b02a85d3b89da5ed5ef8af183148@192.168.1.36:26656", + "timeout_height": "0", + "extension_options": [], + "non_critical_extension_options": [] + }, + "auth_info": { + "signer_infos": [ + { + "public_key": { + "@type": "/cosmos.crypto.secp256k1.PubKey", + "key": "AtLu/W3vulnYQN3Vux1dRlRFpCT3iNCwsFp8bAmvKZIm" + }, + "mode_info": { + "single": { + "mode": "SIGN_MODE_DIRECT" + } + }, + "sequence": "0" + } + ], + "fee": { + "amount": [], + "gas_limit": "200000", + "payer": "", + "granter": "" + }, + "tip": null + }, + "signatures": [ + "mr1DMvJF7OUVk0AeUfIMYPFI2FlREaK/hCZayP2toDkkmSo+AXsjmpQi8uisXJLXfHpwr3mBsyqzBmb+0ZG0zA==" + ] + } + ] + }, + "gov": { + "starting_proposal_id": "1", + "deposits": [], + "votes": [], + "proposals": [], + "deposit_params": null, + "voting_params": null, + "tally_params": null, + "params": { + "min_deposit": [ + { + "denom": "stake", + "amount": "10000000" + } + ], + "max_deposit_period": "172800s", + "voting_period": "10s", + "quorum": "0.334000000000000000", + "threshold": "0.500000000000000000", + "veto_threshold": "0.334000000000000000", + "min_initial_deposit_ratio": "0.000000000000000000", + "proposal_cancel_ratio": "0.500000000000000000", + "proposal_cancel_dest": "", + "expedited_voting_period": "8s", + "expedited_threshold": "0.667000000000000000", + "expedited_min_deposit": [ + { + "denom": "stake", + "amount": "50000000" + } + ], + "burn_vote_quorum": false, + "burn_proposal_deposit_prevote": false, + "burn_vote_veto": true, + "min_deposit_ratio": "0.010000000000000000" + }, + "constitution": "" + }, + "group": { + "group_seq": "0", + "groups": [], + "group_members": [], + "group_policy_seq": "0", + "group_policies": [], + "proposal_seq": "0", + "proposals": [], + "votes": [] + }, + "mint": { + "minter": { + "inflation": "0.130000000000000000", + "annual_provisions": "0.000000000000000000" + }, + "params": { + "mint_denom": "stake", + "inflation_rate_change": "0.130000000000000000", + "inflation_max": "0.200000000000000000", + "inflation_min": "0.070000000000000000", + "goal_bonded": "0.670000000000000000", + "blocks_per_year": "6311520" + } + }, + "nft": { + "classes": [], + "entries": [] + }, + "params": null, + "runtime": null, + "slashing": { + "params": { + "signed_blocks_window": "100", + "min_signed_per_window": "0.500000000000000000", + "downtime_jail_duration": "600s", + "slash_fraction_double_sign": "0.050000000000000000", + "slash_fraction_downtime": "0.010000000000000000" + }, + "signing_infos": [], + "missed_blocks": [] + }, + "staking": { + "params": { + "unbonding_time": "1814400s", + "max_validators": 100, + "max_entries": 7, + "historical_entries": 10000, + "bond_denom": "stake", + "min_commission_rate": "0.000000000000000000" + }, + "last_total_power": "0", + "last_validator_powers": [], + "validators": [], + "delegations": [], + "unbonding_delegations": [], + "redelegations": [], + "exported": false + }, + "upgrade": {}, + "vesting": {} + }, + "consensus": { + "params": { + "block": { + "max_bytes": "22020096", + "max_gas": "-1" + }, + "evidence": { + "max_age_num_blocks": "100000", + "max_age_duration": "172800000000000", + "max_bytes": "1048576" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + }, + "version": { + "app": "0" + }, + "abci": { + "vote_extensions_enable_height": "0" + } + } + } +} \ No newline at end of file diff --git a/testdata/daemon/chain-home/config/gentx/gentx-fe9b522e38d3b02a85d3b89da5ed5ef8af183148.json b/testdata/daemon/chain-home/config/gentx/gentx-fe9b522e38d3b02a85d3b89da5ed5ef8af183148.json new file mode 100644 index 0000000..a6bc8cd --- /dev/null +++ b/testdata/daemon/chain-home/config/gentx/gentx-fe9b522e38d3b02a85d3b89da5ed5ef8af183148.json @@ -0,0 +1 @@ +{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"test","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"","validator_address":"cosmosvaloper1p972yd2ygx2ffrx8uyzc366z8r54qpq6yvfg4h","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"o2e9nKo4olR+XcPKeWeZYKYa4dSHeeRsKx6WJwhGZLg="},"value":{"denom":"stake","amount":"1000000"}}],"memo":"fe9b522e38d3b02a85d3b89da5ed5ef8af183148@192.168.1.36:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AtLu/W3vulnYQN3Vux1dRlRFpCT3iNCwsFp8bAmvKZIm"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""},"tip":null},"signatures":["mr1DMvJF7OUVk0AeUfIMYPFI2FlREaK/hCZayP2toDkkmSo+AXsjmpQi8uisXJLXfHpwr3mBsyqzBmb+0ZG0zA=="]} diff --git a/testdata/daemon/chain-home/config/node_key.json b/testdata/daemon/chain-home/config/node_key.json new file mode 100644 index 0000000..0cd65ac --- /dev/null +++ b/testdata/daemon/chain-home/config/node_key.json @@ -0,0 +1 @@ +{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"KV9AInn+C0YEMQXGXiilitt1yB3/rP7HzyUBp/HjvCXaNbP0o2kVX3kTfMm6qB/VsOOcLMJ2ME0gtaG9zbWu5A=="}} \ No newline at end of file diff --git a/testdata/daemon/chain-home/config/priv_validator_key.json b/testdata/daemon/chain-home/config/priv_validator_key.json new file mode 100644 index 0000000..e48b181 --- /dev/null +++ b/testdata/daemon/chain-home/config/priv_validator_key.json @@ -0,0 +1,11 @@ +{ + "address": "DC39BAF596A407984933D6218D82D61A0F58040D", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "o2e9nKo4olR+XcPKeWeZYKYa4dSHeeRsKx6WJwhGZLg=" + }, + "priv_key": { + "type": "tendermint/PrivKeyEd25519", + "value": "BQiAlgmoo2eL+GfRH5IJ7ze6Xwu0NJWTUKMNs5f0GxWjZ72cqjiiVH5dw8p5Z5lgphrh1Id55GwrHpYnCEZkuA==" + } +} \ No newline at end of file diff --git a/testdata/daemon/chain-home/data/priv_validator_state.json b/testdata/daemon/chain-home/data/priv_validator_state.json new file mode 100644 index 0000000..48f3b67 --- /dev/null +++ b/testdata/daemon/chain-home/data/priv_validator_state.json @@ -0,0 +1,5 @@ +{ + "height": "0", + "round": 0, + "step": 0 +} \ No newline at end of file diff --git a/testdata/daemon/chain-home/keyring-test/04b1363396a2e063efe87025a9eb53a1494f3150.address b/testdata/daemon/chain-home/keyring-test/04b1363396a2e063efe87025a9eb53a1494f3150.address new file mode 100644 index 0000000..a1698f7 --- /dev/null +++ b/testdata/daemon/chain-home/keyring-test/04b1363396a2e063efe87025a9eb53a1494f3150.address @@ -0,0 +1 @@ +eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyNC0xMC0yMCAyMzowMzo0NC4yOTkwMDUwMyArMDIwMCBDRVNUIG09KzAuMDc1NDM5MDg5IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoieUZjN3BXVjk1UUhtOU5sWCJ9.KWWffHHQz18A-qt_c3nCwJoOc94dNw1OFdWXyPK4iqn6_4t0IG4gRA.POcFBlxdvkSuuUE6.d5HrVled1gFUjzrziHqO5DjAn_7lCBENHdReDrSSdkorfd0QucuneIdusopAjxwL1jM4VI8S_sjhrf4yLTx9ijctYalstLkK7y9tDebZt7Go_lQO1D4KtBNus72AF2VAu9OKDnK_Dq45BERD5GrVH8RqNEgJ-_gzddL_3UzcACuc5GPDyAC_mpJJR3bFBxgjLvTLSrWSW7-VV4osXhBOSvdmwWlIafqu-nYgxau6VJJG5Q.X93Wgk1FxPQkL-knwD18sQ \ No newline at end of file diff --git a/testdata/daemon/chain-home/keyring-test/097ca235444194948cc7e10588eb4238e950041a.address b/testdata/daemon/chain-home/keyring-test/097ca235444194948cc7e10588eb4238e950041a.address new file mode 100644 index 0000000..3867e6d --- /dev/null +++ b/testdata/daemon/chain-home/keyring-test/097ca235444194948cc7e10588eb4238e950041a.address @@ -0,0 +1 @@ +eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyNC0xMC0yMCAyMzowMzo0NC4yMTQ1NTM4MjQgKzAyMDAgQ0VTVCBtPSswLjA2MjU0MzA1MSIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6IkNDVGZNQjVua1pkN0pyNUYifQ.5Y6w5w_xIwUltEjJf5e-kCIM1xxndyBvSvHkvH8GCFdpbUm39R5drQ.APGriE6goHixnQQq.W-kknX65jJ6AjylfFkc-MYYv6sIO1APHwT7Q9ZTlRPhrFmequw8zI55GDcySiY2cme42MejAHI4t8w7GTB7t0ZyjdlV8-yFNPv2myld4-_MnjcTH8QsDHHf-S1_P_-nuFs1Ayy69C0pSFI-c0Vdy7-etFTdX3kRSTSQUUIEVHrTagiW-A-hGAoQy7DbhqNWMX-6RLCDAQ0dgYx9zggkAWlYfCvZeFjXjfq0agN6jbbf8kkeAemo.dEIb-b3B4WtjkNL5_h3jPA \ No newline at end of file diff --git a/testdata/daemon/chain-home/keyring-test/alice.info b/testdata/daemon/chain-home/keyring-test/alice.info new file mode 100644 index 0000000..5274550 --- /dev/null +++ b/testdata/daemon/chain-home/keyring-test/alice.info @@ -0,0 +1 @@ +eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyNC0xMC0yMCAyMzowMzo0NC4yMTIyNzY4ODcgKzAyMDAgQ0VTVCBtPSswLjA2MDI2NjExMyIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6ImRRd1pLcDRic2FlSjk1NUsifQ.odpQmUuxDv6l_4awLVSBWjH0fDexZJkOTGcfV6OJDHKo_u04jvYKaQ.Lr9pToGlXXQVHaIJ.Sx1GIobgctKxByL875FUjayWiqRVvNDHVX5md2O1Jo9WsD9FGxsKF0tX0JEQKdBPAN4cfSSL3nct9OfMcU9AQO6q1ifLwCMEqRiJobM203a0hF_oAIyt58-8XDFx4jU02xKg8MFffPukXA7ymfOwEQhkm4vALM2Lw_5cFe6DdWvtM43Yi1F17QOExIagq83cP9fNbLtpDJalZqhQj8es0NQnY7ENC1zvHtjd4cDZnWHX3PWXziefOYVZLjgirV-0sGN-g5pHqRbOKWJhgwBcwQbT8rI3kwwx7SO9wFewY-kiqg8TeUYrPeyWxAlTz1R8X732UfQVGkRc4jPPbjkSzzoo_VgXftJMCDycZ4cq09CiPp0tEyCjsZHGCJTuhWdTDyCUfzh6VtKdgxJfhNQwGFAu8ubQcds31eYQlgd2ZZeew2xzex_e8JT4A2w.bqiXXXqOijCk5BmrluGvyw \ No newline at end of file diff --git a/testdata/daemon/chain-home/keyring-test/bob.info b/testdata/daemon/chain-home/keyring-test/bob.info new file mode 100644 index 0000000..75e5643 --- /dev/null +++ b/testdata/daemon/chain-home/keyring-test/bob.info @@ -0,0 +1 @@ +eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyNC0xMC0yMCAyMzowMzo0NC4yOTY3NDczNTggKzAyMDAgQ0VTVCBtPSswLjA3MzE4MTQzNSIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6Ikhzc1ZzWE9XUkZoWnI5bmgifQ.sDeLQShYki3J_oXHIsm1UkHfkdqN56tc3_Pl5vkPa7gYsHOAQgNmXQ.U_5Z_89qS5HX-btR.4Cmr7HWnwDtSnx7e8bA7MLaS2aPmo-Ut-bGtka_nQNpF4X_fAWftPfgpTpPcJ0P_9zMqqntY6bz28OO9SMpdLPnlJiDV-on-sUqfRr6fkV5z0dyrWd2zlPRBgaNIpgoj-AtcumSS3JqLKdnJVG2beuD7z-cnJI2CptOVvIOuG47K58R3WWZYc6L9_udm-lR6THPPZ03ZCsf0QwrQ9RUrX6yXBbBli16rIcprWyE9tShlOM-fL76v2j92gOKet6llVk5eCNHNlU5qhbvO2KHyCIExRktzHPmJX-H2o8G8M1wCBFjxsmIb7S8Ewf_69fbxm5is423WaHl3MpMCpFvhcazNjBLFEoNsjB8dsAGwtcD4In9ncQOlS6ElSEhBzWleP3rj6lRdsjd_k7gUkAvcCaQvJftTyKdJHVq7GDX2JUgw79DmUdv99BVx.v_MdAXsM9zu9rmhD1AsPBA \ No newline at end of file diff --git a/testdata/daemon/docker-compose.yml.tmpl b/testdata/daemon/docker-compose.yml.tmpl new file mode 100644 index 0000000..0a76399 --- /dev/null +++ b/testdata/daemon/docker-compose.yml.tmpl @@ -0,0 +1,17 @@ +version: '3' +services: + {{ .ServiceName }}: + image: {{.Image}} + user: ${MY_UID}:${MY_UID} + restart: no + volumes: + - ./chain-home:/.simapp + environment: + - SIMD_HALT_HEIGHT=${HALT_HEIGHT} + logging: + options: + max-size: "10g" + max-file: "3" + ports: + - {{ .GrpcPort }}:9090 + - {{ .CometbftPort }}:26657 diff --git a/testdata/daemon/images/v0.0.1/Dockerfile b/testdata/daemon/images/v0.0.1/Dockerfile new file mode 100644 index 0000000..f2feb88 --- /dev/null +++ b/testdata/daemon/images/v0.0.1/Dockerfile @@ -0,0 +1,10 @@ +FROM debian:trixie-slim as os +ENV DEBIAN_FRONTEND=noninteractive + +WORKDIR /simd + +COPY ./simd-1 /usr/bin/simd +COPY ./start_simd_with_upgrade.sh /usr/bin/start_simd_with_upgrade.sh +RUN chmod +x /usr/bin/* + +CMD ["start_simd_with_upgrade.sh", "--home", "/.simapp", "start"] diff --git a/testdata/daemon/images/v0.0.1/simd-1 b/testdata/daemon/images/v0.0.1/simd-1 new file mode 100755 index 0000000..7d79664 Binary files /dev/null and b/testdata/daemon/images/v0.0.1/simd-1 differ diff --git a/testdata/daemon/images/v0.0.1/start_simd_with_upgrade.sh b/testdata/daemon/images/v0.0.1/start_simd_with_upgrade.sh new file mode 100755 index 0000000..23223f4 --- /dev/null +++ b/testdata/daemon/images/v0.0.1/start_simd_with_upgrade.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -eu + +SIMD=/usr/bin/simd +if [ ! -f $SIMD ]; then + echo "simd binary not found under $SIMD" + exit 1 +fi + +echo "Starting simd" +$SIMD "$@" & +PID=$! +trap "kill $PID" EXIT + +echo "Waiting for simd to start" +sleep 3 + +echo "Registering upgrade at height 10" +$SIMD tx upgrade software-upgrade test1 --title="Test Proposal" --summary="testing" --deposit="100000000stake" --upgrade-height 10 --upgrade-info '{ "binaries": { "linux/amd64":"https://example.com/simd.zip?checksum=sha256:aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f" } }' --from alice --no-validate -y +sleep 3 + +echo "Vote for upgrade" +$SIMD tx gov vote 1 yes --from alice -y +$SIMD tx gov vote 1 yes --from bob -y + +wait "$PID" diff --git a/testdata/daemon/images/v0.0.2/Dockerfile b/testdata/daemon/images/v0.0.2/Dockerfile new file mode 100644 index 0000000..cfc7d45 --- /dev/null +++ b/testdata/daemon/images/v0.0.2/Dockerfile @@ -0,0 +1,9 @@ +FROM debian:trixie-slim as os +ENV DEBIAN_FRONTEND=noninteractive + +WORKDIR /simd + +COPY ./simd-2 /usr/bin/simd +RUN chmod +x /usr/bin/* + +CMD ["simd", "--home", "/.simapp", "start"] diff --git a/testdata/daemon/images/v0.0.2/simd-2 b/testdata/daemon/images/v0.0.2/simd-2 new file mode 100755 index 0000000..a6c8ef5 Binary files /dev/null and b/testdata/daemon/images/v0.0.2/simd-2 differ diff --git a/testdata/docker/compose-env-echo/docker-compose.yml b/testdata/docker/compose-env-echo/docker-compose.yml new file mode 100644 index 0000000..9335cc4 --- /dev/null +++ b/testdata/docker/compose-env-echo/docker-compose.yml @@ -0,0 +1,9 @@ +version: '3.8' + +services: + s1: + image: "testrepo/env-echo:latest" + ports: + - "127.0.0.1:4444:8080" + environment: + - HALT_HEIGHT=${HALT_HEIGHT} diff --git a/testdata/docker/compose-no-image/docker-compose.yml b/testdata/docker/compose-no-image/docker-compose.yml new file mode 100644 index 0000000..5f325c9 --- /dev/null +++ b/testdata/docker/compose-no-image/docker-compose.yml @@ -0,0 +1,5 @@ +version: '3.8' + +services: + s1: + build: . diff --git a/testdata/docker/compose-upgrade-test/docker-compose.yml b/testdata/docker/compose-upgrade-test/docker-compose.yml new file mode 100644 index 0000000..ea1e1f8 --- /dev/null +++ b/testdata/docker/compose-upgrade-test/docker-compose.yml @@ -0,0 +1,5 @@ +version: '3.8' + +services: + s1: + image: "abcd/efgh:ijkl" diff --git a/testdata/docker/compose-valid-template/docker-compose.yml.tmpl b/testdata/docker/compose-valid-template/docker-compose.yml.tmpl new file mode 100644 index 0000000..c34c339 --- /dev/null +++ b/testdata/docker/compose-valid-template/docker-compose.yml.tmpl @@ -0,0 +1,5 @@ +version: '3.8' + +services: + s1: + image: {{.Image}} diff --git a/testdata/docker/compose-valid/docker-compose.yml b/testdata/docker/compose-valid/docker-compose.yml new file mode 100644 index 0000000..e7861e0 --- /dev/null +++ b/testdata/docker/compose-valid/docker-compose.yml @@ -0,0 +1,5 @@ +version: '3.8' + +services: + s1: + image: "testrepo/testimage:latest" diff --git a/testdata/docker/docker-credential-helper/exit-1.sh b/testdata/docker/docker-credential-helper/exit-1.sh new file mode 100755 index 0000000..ecdbef9 --- /dev/null +++ b/testdata/docker/docker-credential-helper/exit-1.sh @@ -0,0 +1,2 @@ +#!/bin/sh +exit 1 diff --git a/testdata/docker/docker-credential-helper/sleep.sh b/testdata/docker/docker-credential-helper/sleep.sh new file mode 100755 index 0000000..dd7339a --- /dev/null +++ b/testdata/docker/docker-credential-helper/sleep.sh @@ -0,0 +1,2 @@ +#!/bin/sh +sleep 100 diff --git a/testdata/docker/docker-credential-helper/valid.sh b/testdata/docker/docker-credential-helper/valid.sh new file mode 100755 index 0000000..d193252 --- /dev/null +++ b/testdata/docker/docker-credential-helper/valid.sh @@ -0,0 +1,2 @@ +#!/bin/sh +echo "{\"Username\": \"test\", \"Secret\": \"test\"}" diff --git a/testdata/docker/docker-credential-helper/wrong-json.sh b/testdata/docker/docker-credential-helper/wrong-json.sh new file mode 100755 index 0000000..4dbc98d --- /dev/null +++ b/testdata/docker/docker-credential-helper/wrong-json.sh @@ -0,0 +1,2 @@ +#!/bin/sh +echo "{\"abcd\": \"efgh\", \"ijkl\": \"mnop\"}" diff --git a/testdata/docker/echo-dockerfile/Dockerfile b/testdata/docker/echo-dockerfile/Dockerfile new file mode 100644 index 0000000..d29af9b --- /dev/null +++ b/testdata/docker/echo-dockerfile/Dockerfile @@ -0,0 +1,6 @@ +FROM golang:1.22-alpine3.19 +ADD http-echo.go / +ADD go.mod / +RUN cd / && go build http-echo.go +CMD ["/http-echo"] + diff --git a/testdata/docker/echo-dockerfile/go.mod b/testdata/docker/echo-dockerfile/go.mod new file mode 100644 index 0000000..5dc375d --- /dev/null +++ b/testdata/docker/echo-dockerfile/go.mod @@ -0,0 +1,3 @@ +module example.com + +go 1.22.2 diff --git a/testdata/docker/echo-dockerfile/http-echo.go b/testdata/docker/echo-dockerfile/http-echo.go new file mode 100644 index 0000000..7b59064 --- /dev/null +++ b/testdata/docker/echo-dockerfile/http-echo.go @@ -0,0 +1,18 @@ +package main + +import ( + "fmt" + "net/http" + "os" + "strings" +) + +func main() { + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + env := os.Environ() + w.Header().Set("Content-Type", "text/plain") + fmt.Fprint(w, strings.Join(env, "\n")) + }) + + http.ListenAndServe(":8080", nil) +} diff --git a/testdata/docker/envfile/env b/testdata/docker/envfile/env new file mode 100644 index 0000000..66e225a --- /dev/null +++ b/testdata/docker/envfile/env @@ -0,0 +1 @@ +VERSION_s1=5.3 diff --git a/testdata/docker/envfile/env-with-multiple-services b/testdata/docker/envfile/env-with-multiple-services new file mode 100644 index 0000000..70dd4ba --- /dev/null +++ b/testdata/docker/envfile/env-with-multiple-services @@ -0,0 +1,7 @@ +VERSION_s1=5.3 +# case sensitive - s1 != S2 +VERSION_S1=5.4 +# invalid entry +VERSIONS1=5.5 +# invalid entry +version_s2=5.6 diff --git a/testdata/docker/sleep-dockerfile/Dockerfile b/testdata/docker/sleep-dockerfile/Dockerfile new file mode 100644 index 0000000..7b1ca49 --- /dev/null +++ b/testdata/docker/sleep-dockerfile/Dockerfile @@ -0,0 +1,2 @@ +FROM debian:buster-slim +CMD ["sleep", "infinity"] diff --git a/testdata/provider/local/different-upgrade-network.json b/testdata/provider/local/different-upgrade-network.json new file mode 100644 index 0000000..1ae9ac5 --- /dev/null +++ b/testdata/provider/local/different-upgrade-network.json @@ -0,0 +1,14 @@ +{ + "upgrades": [ + { + "height": 10, + "tag": "v1.0.0", + "network": "not-test", + "name": "an upgrade", + "type": 1, + "priority": 1, + "source": 1 + } + ], + "versions": [] +} \ No newline at end of file diff --git a/testdata/provider/local/different-version-network.json b/testdata/provider/local/different-version-network.json new file mode 100644 index 0000000..f00b6a5 --- /dev/null +++ b/testdata/provider/local/different-version-network.json @@ -0,0 +1,12 @@ +{ + "upgrades": [], + "versions": [ + { + "height": 10, + "network": "not-test", + "tag": "a-tag", + "source": 0, + "priority": 1 + } + ] +} diff --git a/testdata/provider/local/duplicate-upgrade.json b/testdata/provider/local/duplicate-upgrade.json new file mode 100644 index 0000000..c29fb3a --- /dev/null +++ b/testdata/provider/local/duplicate-upgrade.json @@ -0,0 +1,23 @@ +{ + "upgrades": [ + { + "height": 10, + "tag": "v1.0.0", + "network": "test", + "name": "an upgrade", + "type": 1, + "priority": 1, + "source": 1 + }, + { + "height": 10, + "tag": "v1.0.0-different", + "network": "test", + "name": "a different upgrade", + "type": 1, + "priority": 1, + "source": 1 + } + ], + "versions": [] +} diff --git a/testdata/provider/local/duplicate-version.json b/testdata/provider/local/duplicate-version.json new file mode 100644 index 0000000..71ef761 --- /dev/null +++ b/testdata/provider/local/duplicate-version.json @@ -0,0 +1,19 @@ +{ + "upgrades": [], + "versions": [ + { + "height": 10, + "network": "test", + "tag": "a-tag", + "source": 0, + "priority": 1 + }, + { + "height": 10, + "network": "test", + "tag": "another-tag", + "source": 0, + "priority": 1 + } + ] +} diff --git a/testdata/provider/local/test.json b/testdata/provider/local/test.json new file mode 100644 index 0000000..c5588ed --- /dev/null +++ b/testdata/provider/local/test.json @@ -0,0 +1,49 @@ +{ + "upgrades": [ + { + "height": 10, + "tag": "v1.0.0", + "network": "test", + "name": "invalid_upcoming_upgrade_due_to_passed_height", + "type": 1, + "priority": 1, + "source": 1 + }, + { + "height": 100, + "tag": "v1.0.0", + "network": "test", + "name": "valid_upcoming_upgrade", + "type": 1, + "priority": 1, + "source": 1 + }, + { + "height": 101, + "network": "test", + "name": "valid_upgrade_without_tag", + "type": 1, + "priority": 1, + "source": 1 + }, + { + "height": 102, + "tag": "v1.0.0", + "network": "test", + "name": "invalid_upcoming_upgrade_due_to_cancelled_status", + "type": 1, + "status": 6, + "priority": 1, + "source": 1 + } + ], + "versions": [ + { + "height": 10, + "network": "test", + "tag": "a-tag", + "source": 0, + "priority": 1 + } + ] +} \ No newline at end of file diff --git a/testdata/scripts/build_simapp.sh b/testdata/scripts/build_simapp.sh new file mode 100755 index 0000000..9d6066e --- /dev/null +++ b/testdata/scripts/build_simapp.sh @@ -0,0 +1,60 @@ +#!/bin/bash + +set -eu + +BLAZAR_DIR=$PWD + +echo "Cloning cosmos-sdk" +rm -rf /tmp/cosmos-sdk || true +git clone https://github.com/cosmos/cosmos-sdk.git /tmp/cosmos-sdk +cd /tmp/cosmos-sdk +git checkout v0.50.10 + +echo "Building cosmos-sdk" +make build + +echo "Copy simapp as simd-1" +cp ./build/simd $BLAZAR_DIR/testdata/daemon/images/v0.0.1/simd-1 + +echo "Build and copy simapp as simd-2" +sed -i 's/const UpgradeName = "v047-to-v050"/const UpgradeName = "test1"/g' simapp/upgrades.go +sed -i simapp/upgrades.go -re "28,43d" +sed -i simapp/upgrades.go -re "6,7d" +make build +cp ./build/simd $BLAZAR_DIR/testdata/daemon/images/v0.0.2/simd-2 + +echo "Initializing simapp" +rm -r ~/.simapp || true +SIMD_BIN="./build/simd" + +# source: cosmos-sdk/scripts/init-simapp.sh +if [ -z "$SIMD_BIN" ]; then echo "SIMD_BIN is not set. Make sure to run make install before"; exit 1; fi +echo "using $SIMD_BIN" +if [ -d "$($SIMD_BIN config home)" ]; then rm -rv $($SIMD_BIN config home); fi +$SIMD_BIN config set client chain-id demo +$SIMD_BIN config set client keyring-backend test +$SIMD_BIN config set app api.enable true +$SIMD_BIN keys add alice +$SIMD_BIN keys add bob +$SIMD_BIN init test --chain-id demo + +echo "Modify voting period in genesis.json" +sed -i 's/"voting_period": "172800s",/"voting_period": "10s",/g' ~/.simapp/config/genesis.json +sed -i 's/"expedited_voting_period": "86400s",/"expedited_voting_period": "8s",/g' ~/.simapp/config/genesis.json + +echo "Adding genesis accounts" +$SIMD_BIN genesis add-genesis-account alice 5000000000stake --keyring-backend test +$SIMD_BIN genesis add-genesis-account bob 5000000000stake --keyring-backend test +$SIMD_BIN genesis gentx alice 1000000stake --chain-id demo +$SIMD_BIN genesis collect-gentxs + +echo "Configure cometbft settings" +sed -i 's/timeout_commit = "5s"/timeout_commit = "2s"/g' ~/.simapp/config/config.toml +sed -i 's/laddr = "tcp:\/\/127.0.0.1:26657"/laddr = "tcp:\/\/0.0.0.0:26657"/g' ~/.simapp/config/config.toml + +sed -i 's/localhost:1317/0.0.0.0:1317/g' ~/.simapp/config/app.toml +sed -i 's/localhost:9090/0.0.0.0:9090/g' ~/.simapp/config/app.toml + +echo "Copy .simapp to testdata/daemon/chain-home" +rm -rf $BLAZAR_DIR/testdata/daemon/chain-home || true +cp -r ~/.simapp $BLAZAR_DIR/testdata/daemon/chain-home diff --git a/testdata/scripts/start_simd_with_upgrade.sh b/testdata/scripts/start_simd_with_upgrade.sh new file mode 100755 index 0000000..23223f4 --- /dev/null +++ b/testdata/scripts/start_simd_with_upgrade.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -eu + +SIMD=/usr/bin/simd +if [ ! -f $SIMD ]; then + echo "simd binary not found under $SIMD" + exit 1 +fi + +echo "Starting simd" +$SIMD "$@" & +PID=$! +trap "kill $PID" EXIT + +echo "Waiting for simd to start" +sleep 3 + +echo "Registering upgrade at height 10" +$SIMD tx upgrade software-upgrade test1 --title="Test Proposal" --summary="testing" --deposit="100000000stake" --upgrade-height 10 --upgrade-info '{ "binaries": { "linux/amd64":"https://example.com/simd.zip?checksum=sha256:aec070645fe53ee3b3763059376134f058cc337247c978add178b6ccdfb0019f" } }' --from alice --no-validate -y +sleep 3 + +echo "Vote for upgrade" +$SIMD tx gov vote 1 yes --from alice -y +$SIMD tx gov vote 1 yes --from bob -y + +wait "$PID" diff --git a/testdata/upgrade-files/f1-good.json b/testdata/upgrade-files/f1-good.json new file mode 100644 index 0000000..fd64644 --- /dev/null +++ b/testdata/upgrade-files/f1-good.json @@ -0,0 +1 @@ +{"name": "upgrade1", "info": "some info", "height": 123} diff --git a/testdata/upgrade-files/f2-bad-type-2.json b/testdata/upgrade-files/f2-bad-type-2.json new file mode 100644 index 0000000..8019e2a --- /dev/null +++ b/testdata/upgrade-files/f2-bad-type-2.json @@ -0,0 +1 @@ +{"name": "upgrade1", "heigh": "123"} diff --git a/testdata/upgrade-files/f2-bad-type.json b/testdata/upgrade-files/f2-bad-type.json new file mode 100644 index 0000000..4abd0f7 --- /dev/null +++ b/testdata/upgrade-files/f2-bad-type.json @@ -0,0 +1 @@ +{"name": "upgrade1", "info": 123, "heigh": 123} diff --git a/testdata/upgrade-files/f2-normalized-name.json b/testdata/upgrade-files/f2-normalized-name.json new file mode 100644 index 0000000..af81a0b --- /dev/null +++ b/testdata/upgrade-files/f2-normalized-name.json @@ -0,0 +1 @@ +{"name": "Upgrade2", "info": "some info", "height": 125} diff --git a/testdata/upgrade-files/f3-empty.json b/testdata/upgrade-files/f3-empty.json new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/testdata/upgrade-files/f3-empty.json @@ -0,0 +1 @@ + diff --git a/testdata/upgrade-files/f4-empty-obj.json b/testdata/upgrade-files/f4-empty-obj.json new file mode 100644 index 0000000..0967ef4 --- /dev/null +++ b/testdata/upgrade-files/f4-empty-obj.json @@ -0,0 +1 @@ +{} diff --git a/testdata/upgrade-files/f5-partial-obj-1.json b/testdata/upgrade-files/f5-partial-obj-1.json new file mode 100644 index 0000000..19aecd9 --- /dev/null +++ b/testdata/upgrade-files/f5-partial-obj-1.json @@ -0,0 +1 @@ +{"name": "upgrade2"} diff --git a/testdata/upgrade-files/f5-partial-obj-2.json b/testdata/upgrade-files/f5-partial-obj-2.json new file mode 100644 index 0000000..0f13ee9 --- /dev/null +++ b/testdata/upgrade-files/f5-partial-obj-2.json @@ -0,0 +1 @@ +{"height": 1}